code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string | Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002') | Below is the the instruction that describes the task:
### Input:
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
### Response:
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string |
def _process_query(self, query, prepared=False):
""" Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
"""
# Exit condition and POST
if prepared is True:
files = {'query': str(query)}
logger.debug('About to submit the following query {}'.format(query))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
text = query['text']
sentence_coordinates = [
{
"offsetStart": 0,
"offsetEnd": len(text)
}
]
total_nb_sentences = len(sentence_coordinates) # Sentences from text.
sentences_groups = []
if len(text) > self.max_text_length:
res, status_code = self.segment(text)
if status_code == 200:
sentence_coordinates = res['sentences']
total_nb_sentences = len(sentence_coordinates)
else:
logger.error('Error during the segmentation of the text.')
logger.debug(
'Text too long, split in {} sentences; building groups of {} '
'sentences.'.format(
total_nb_sentences, self.sentences_per_group
)
)
sentences_groups = self._group_sentences(
total_nb_sentences,
self.sentences_per_group
)
else:
query['sentence'] = "true"
if total_nb_sentences > 1:
query['sentences'] = sentence_coordinates
if len(sentences_groups) > 0:
for group in sentences_groups:
query['processSentence'] = group
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
if 'entities' in res:
query['entities'] = res[u'entities']
query['language'] = res[u'language']
else:
logger.error(
"Error when processing the query {}".format(query)
)
return None, status_code
else:
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
query['language'] = res[u'language']
if 'entities' in res:
query['entities'] = res[u'entities']
else:
logger.error("Error when processing the query {}".format(query))
return None, status_code
return query, status_code | Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API. | Below is the the instruction that describes the task:
### Input:
Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
### Response:
def _process_query(self, query, prepared=False):
""" Process query recursively, if the text is too long,
it is split and processed bit a bit.
Args:
query (sdict): Text to be processed.
prepared (bool): True when the query is ready to be submitted via
POST request.
Returns:
str: Body ready to be submitted to the API.
"""
# Exit condition and POST
if prepared is True:
files = {'query': str(query)}
logger.debug('About to submit the following query {}'.format(query))
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status == 200:
return self.decode(res), status
else:
logger.debug('Disambiguation failed.')
return None, status
text = query['text']
sentence_coordinates = [
{
"offsetStart": 0,
"offsetEnd": len(text)
}
]
total_nb_sentences = len(sentence_coordinates) # Sentences from text.
sentences_groups = []
if len(text) > self.max_text_length:
res, status_code = self.segment(text)
if status_code == 200:
sentence_coordinates = res['sentences']
total_nb_sentences = len(sentence_coordinates)
else:
logger.error('Error during the segmentation of the text.')
logger.debug(
'Text too long, split in {} sentences; building groups of {} '
'sentences.'.format(
total_nb_sentences, self.sentences_per_group
)
)
sentences_groups = self._group_sentences(
total_nb_sentences,
self.sentences_per_group
)
else:
query['sentence'] = "true"
if total_nb_sentences > 1:
query['sentences'] = sentence_coordinates
if len(sentences_groups) > 0:
for group in sentences_groups:
query['processSentence'] = group
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
if 'entities' in res:
query['entities'] = res[u'entities']
query['language'] = res[u'language']
else:
logger.error(
"Error when processing the query {}".format(query)
)
return None, status_code
else:
res, status_code = self._process_query(query, prepared=True)
if status_code == 200:
query['language'] = res[u'language']
if 'entities' in res:
query['entities'] = res[u'entities']
else:
logger.error("Error when processing the query {}".format(query))
return None, status_code
return query, status_code |
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False | Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise. | Below is the the instruction that describes the task:
### Input:
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
### Response:
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False |
def apply_defaults(self, commands):
""" apply default settings to commands
not static, shadow "self" in eval
"""
for command in commands:
if 'action' in command and "()" in command['action']:
command['action'] = eval("self.{}".format(command['action']))
if command['keys'][0].startswith('-'):
if 'required' not in command:
command['required'] = False | apply default settings to commands
not static, shadow "self" in eval | Below is the the instruction that describes the task:
### Input:
apply default settings to commands
not static, shadow "self" in eval
### Response:
def apply_defaults(self, commands):
""" apply default settings to commands
not static, shadow "self" in eval
"""
for command in commands:
if 'action' in command and "()" in command['action']:
command['action'] = eval("self.{}".format(command['action']))
if command['keys'][0].startswith('-'):
if 'required' not in command:
command['required'] = False |
def write(self, file_path):
"""
Write the audio data to file.
Return ``True`` on success, or ``False`` otherwise.
:param string file_path: the path of the output file to be written
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
.. versionadded:: 1.2.0
"""
if self.__samples is None:
if self.file_path is None:
self.log_exc(u"AudioFile object not initialized", None, True, AudioFileNotInitializedError)
else:
self.read_samples_from_file()
self.log([u"Writing audio file '%s'...", file_path])
try:
# our value is a float64 in [-1, 1]
# scipy writes the sample as an int16_t, that is, a number in [-32768, 32767]
data = (self.audio_samples * 32768).astype("int16")
scipywavwrite(file_path, self.audio_sample_rate, data)
except Exception as exc:
self.log_exc(u"Error writing audio file to '%s'" % (file_path), exc, True, OSError)
self.log([u"Writing audio file '%s'... done", file_path]) | Write the audio data to file.
Return ``True`` on success, or ``False`` otherwise.
:param string file_path: the path of the output file to be written
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
.. versionadded:: 1.2.0 | Below is the the instruction that describes the task:
### Input:
Write the audio data to file.
Return ``True`` on success, or ``False`` otherwise.
:param string file_path: the path of the output file to be written
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
.. versionadded:: 1.2.0
### Response:
def write(self, file_path):
"""
Write the audio data to file.
Return ``True`` on success, or ``False`` otherwise.
:param string file_path: the path of the output file to be written
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
.. versionadded:: 1.2.0
"""
if self.__samples is None:
if self.file_path is None:
self.log_exc(u"AudioFile object not initialized", None, True, AudioFileNotInitializedError)
else:
self.read_samples_from_file()
self.log([u"Writing audio file '%s'...", file_path])
try:
# our value is a float64 in [-1, 1]
# scipy writes the sample as an int16_t, that is, a number in [-32768, 32767]
data = (self.audio_samples * 32768).astype("int16")
scipywavwrite(file_path, self.audio_sample_rate, data)
except Exception as exc:
self.log_exc(u"Error writing audio file to '%s'" % (file_path), exc, True, OSError)
self.log([u"Writing audio file '%s'... done", file_path]) |
def _reindex(self):
"""
Index every sentence into a cluster
"""
self._len2split_idx = {}
last_split = -1
for split_idx, split in enumerate(self._splits):
self._len2split_idx.update(
dict(list(zip(list(range(last_split + 1, split)), [split_idx] * (split - (last_split + 1)))))) | Index every sentence into a cluster | Below is the the instruction that describes the task:
### Input:
Index every sentence into a cluster
### Response:
def _reindex(self):
"""
Index every sentence into a cluster
"""
self._len2split_idx = {}
last_split = -1
for split_idx, split in enumerate(self._splits):
self._len2split_idx.update(
dict(list(zip(list(range(last_split + 1, split)), [split_idx] * (split - (last_split + 1)))))) |
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1) | Add Builders and construction variables for f95 to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for f95 to an Environment.
### Response:
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1) |
def _ScanEncryptedVolumeNode(self, scan_context, scan_node):
"""Scans an encrypted volume node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
Raises:
BackEndError: if the scan node cannot be unlocked.
ValueError: if the scan context or scan node is invalid.
"""
if scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
container_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
fsapfs_volume = container_file_entry.GetAPFSVolume()
# TODO: unlocking the volume multiple times is inefficient cache volume
# object in scan node and use is_locked = fsapfs_volume.is_locked()
try:
is_locked = not apfs_helper.APFSUnlockVolume(
fsapfs_volume, scan_node.path_spec, resolver.Resolver.key_chain)
except IOError as exception:
raise errors.BackEndError(
'Unable to unlock APFS volume with error: {0!s}'.format(exception))
else:
file_object = resolver.Resolver.OpenFileObject(
scan_node.path_spec, resolver_context=self._resolver_context)
is_locked = not file_object or file_object.is_locked
file_object.close()
if is_locked:
scan_context.LockScanNode(scan_node.path_spec)
# For BitLocker To Go add a scan node for the unencrypted part of
# the volume.
if scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node) | Scans an encrypted volume node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
Raises:
BackEndError: if the scan node cannot be unlocked.
ValueError: if the scan context or scan node is invalid. | Below is the the instruction that describes the task:
### Input:
Scans an encrypted volume node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
Raises:
BackEndError: if the scan node cannot be unlocked.
ValueError: if the scan context or scan node is invalid.
### Response:
def _ScanEncryptedVolumeNode(self, scan_context, scan_node):
"""Scans an encrypted volume node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
Raises:
BackEndError: if the scan node cannot be unlocked.
ValueError: if the scan context or scan node is invalid.
"""
if scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
container_file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
fsapfs_volume = container_file_entry.GetAPFSVolume()
# TODO: unlocking the volume multiple times is inefficient cache volume
# object in scan node and use is_locked = fsapfs_volume.is_locked()
try:
is_locked = not apfs_helper.APFSUnlockVolume(
fsapfs_volume, scan_node.path_spec, resolver.Resolver.key_chain)
except IOError as exception:
raise errors.BackEndError(
'Unable to unlock APFS volume with error: {0!s}'.format(exception))
else:
file_object = resolver.Resolver.OpenFileObject(
scan_node.path_spec, resolver_context=self._resolver_context)
is_locked = not file_object or file_object.is_locked
file_object.close()
if is_locked:
scan_context.LockScanNode(scan_node.path_spec)
# For BitLocker To Go add a scan node for the unencrypted part of
# the volume.
if scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node) |
def batch_size(self):
"""int: The number of results to fetch per batch. Clamped to
limit if limit is set and is smaller than the given batch
size.
"""
batch_size = self.get("batch_size", DEFAULT_BATCH_SIZE)
if self.limit is not None:
return min(self.limit, batch_size)
return batch_size | int: The number of results to fetch per batch. Clamped to
limit if limit is set and is smaller than the given batch
size. | Below is the the instruction that describes the task:
### Input:
int: The number of results to fetch per batch. Clamped to
limit if limit is set and is smaller than the given batch
size.
### Response:
def batch_size(self):
"""int: The number of results to fetch per batch. Clamped to
limit if limit is set and is smaller than the given batch
size.
"""
batch_size = self.get("batch_size", DEFAULT_BATCH_SIZE)
if self.limit is not None:
return min(self.limit, batch_size)
return batch_size |
def check_crc(f, inf, desc):
"""Compare result crc to expected value.
"""
exp = inf._md_expect
if exp is None:
return
ucrc = f._md_context.digest()
if ucrc != exp:
print('crc error - %s - exp=%r got=%r' % (desc, exp, ucrc)) | Compare result crc to expected value. | Below is the the instruction that describes the task:
### Input:
Compare result crc to expected value.
### Response:
def check_crc(f, inf, desc):
"""Compare result crc to expected value.
"""
exp = inf._md_expect
if exp is None:
return
ucrc = f._md_context.digest()
if ucrc != exp:
print('crc error - %s - exp=%r got=%r' % (desc, exp, ucrc)) |
def write(connection, skip, directory, force):
"""Write all as BEL."""
os.makedirs(directory, exist_ok=True)
from .manager.bel_manager import BELManagerMixin
import pybel
for idx, name, manager in _iterate_managers(connection, skip):
if not isinstance(manager, BELManagerMixin):
continue
click.secho(name, fg='cyan', bold=True)
path = os.path.join(directory, f'{name}.bel.pickle')
if os.path.exists(path) and not force:
click.echo('๐ already exported')
continue
if not manager.is_populated():
click.echo('๐ unpopulated')
else:
graph = manager.to_bel()
pybel.to_pickle(graph, path)
pybel.to_json_path(graph, os.path.join(directory, f'{name}.bel.json')) | Write all as BEL. | Below is the the instruction that describes the task:
### Input:
Write all as BEL.
### Response:
def write(connection, skip, directory, force):
"""Write all as BEL."""
os.makedirs(directory, exist_ok=True)
from .manager.bel_manager import BELManagerMixin
import pybel
for idx, name, manager in _iterate_managers(connection, skip):
if not isinstance(manager, BELManagerMixin):
continue
click.secho(name, fg='cyan', bold=True)
path = os.path.join(directory, f'{name}.bel.pickle')
if os.path.exists(path) and not force:
click.echo('๐ already exported')
continue
if not manager.is_populated():
click.echo('๐ unpopulated')
else:
graph = manager.to_bel()
pybel.to_pickle(graph, path)
pybel.to_json_path(graph, os.path.join(directory, f'{name}.bel.json')) |
def overlaps(self, box, th=0.0001):
"""
Check whether this box and given box overlaps at least by given threshold.
:param box: Box to compare with
:param th: Threshold above which overlapping should be considered
:returns: True if overlaps
"""
int_box = Box.intersection_box(self, box)
small_box = self if self.smaller(box) else box
return True if int_box.area() / small_box.area() >= th else False | Check whether this box and given box overlaps at least by given threshold.
:param box: Box to compare with
:param th: Threshold above which overlapping should be considered
:returns: True if overlaps | Below is the the instruction that describes the task:
### Input:
Check whether this box and given box overlaps at least by given threshold.
:param box: Box to compare with
:param th: Threshold above which overlapping should be considered
:returns: True if overlaps
### Response:
def overlaps(self, box, th=0.0001):
"""
Check whether this box and given box overlaps at least by given threshold.
:param box: Box to compare with
:param th: Threshold above which overlapping should be considered
:returns: True if overlaps
"""
int_box = Box.intersection_box(self, box)
small_box = self if self.smaller(box) else box
return True if int_box.area() / small_box.area() >= th else False |
def time(self) -> Time:
"""Generate a random time object.
:return: ``datetime.time`` object.
"""
random_time = time(
self.random.randint(0, 23),
self.random.randint(0, 59),
self.random.randint(0, 59),
self.random.randint(0, 999999),
)
return random_time | Generate a random time object.
:return: ``datetime.time`` object. | Below is the the instruction that describes the task:
### Input:
Generate a random time object.
:return: ``datetime.time`` object.
### Response:
def time(self) -> Time:
"""Generate a random time object.
:return: ``datetime.time`` object.
"""
random_time = time(
self.random.randint(0, 23),
self.random.randint(0, 59),
self.random.randint(0, 59),
self.random.randint(0, 999999),
)
return random_time |
def condition_details_has_owner(condition_details, owner):
"""Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherwise
"""
if 'subconditions' in condition_details:
result = condition_details_has_owner(condition_details['subconditions'], owner)
if result:
return True
elif isinstance(condition_details, list):
for subcondition in condition_details:
result = condition_details_has_owner(subcondition, owner)
if result:
return True
else:
if 'public_key' in condition_details \
and owner == condition_details['public_key']:
return True
return False | Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherwise | Below is the the instruction that describes the task:
### Input:
Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherwise
### Response:
def condition_details_has_owner(condition_details, owner):
"""Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherwise
"""
if 'subconditions' in condition_details:
result = condition_details_has_owner(condition_details['subconditions'], owner)
if result:
return True
elif isinstance(condition_details, list):
for subcondition in condition_details:
result = condition_details_has_owner(subcondition, owner)
if result:
return True
else:
if 'public_key' in condition_details \
and owner == condition_details['public_key']:
return True
return False |
def virtual_networks_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all virtual networks within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.virtual_networks_list_all
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
vnets = __utils__['azurearm.paged_object_to_list'](netconn.virtual_networks.list_all())
for vnet in vnets:
result[vnet['name']] = vnet
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
List all virtual networks within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.virtual_networks_list_all | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
List all virtual networks within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.virtual_networks_list_all
### Response:
def virtual_networks_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all virtual networks within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.virtual_networks_list_all
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
vnets = __utils__['azurearm.paged_object_to_list'](netconn.virtual_networks.list_all())
for vnet in vnets:
result[vnet['name']] = vnet
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result |
def get_param(par, args, m1, m2, s1z, s2z):
"""
Helper function
Parameters
----------
par : string
Name of parameter to calculate
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
m1 : float or array of floats
First binary component mass (etc.)
Returns
-------
parvals : float or array of floats
Calculated parameter values
"""
if par == 'mchirp':
parvals = conversions.mchirp_from_mass1_mass2(m1, m2)
elif par == 'mtotal':
parvals = m1 + m2
elif par == 'eta':
parvals = conversions.eta_from_mass1_mass2(m1, m2)
elif par in ['chi_eff', 'effective_spin']:
parvals = conversions.chi_eff(m1, m2, s1z, s2z)
elif par == 'template_duration':
# default to SEOBNRv4 duration function
parvals = pnutils.get_imr_duration(m1, m2, s1z, s2z, args.f_lower,
args.approximant or "SEOBNRv4")
if args.min_duration:
parvals += args.min_duration
elif par == 'tau0':
parvals = conversions.tau0_from_mass1_mass2(m1, m2, args.f_lower)
elif par == 'tau3':
parvals = conversions.tau3_from_mass1_mass2(m1, m2, args.f_lower)
elif par in pnutils.named_frequency_cutoffs.keys():
parvals = pnutils.frequency_cutoff_from_name(par, m1, m2, s1z, s2z)
else:
# try asking for a LALSimulation frequency function
parvals = pnutils.get_freq(par, m1, m2, s1z, s2z)
return parvals | Helper function
Parameters
----------
par : string
Name of parameter to calculate
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
m1 : float or array of floats
First binary component mass (etc.)
Returns
-------
parvals : float or array of floats
Calculated parameter values | Below is the the instruction that describes the task:
### Input:
Helper function
Parameters
----------
par : string
Name of parameter to calculate
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
m1 : float or array of floats
First binary component mass (etc.)
Returns
-------
parvals : float or array of floats
Calculated parameter values
### Response:
def get_param(par, args, m1, m2, s1z, s2z):
"""
Helper function
Parameters
----------
par : string
Name of parameter to calculate
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
m1 : float or array of floats
First binary component mass (etc.)
Returns
-------
parvals : float or array of floats
Calculated parameter values
"""
if par == 'mchirp':
parvals = conversions.mchirp_from_mass1_mass2(m1, m2)
elif par == 'mtotal':
parvals = m1 + m2
elif par == 'eta':
parvals = conversions.eta_from_mass1_mass2(m1, m2)
elif par in ['chi_eff', 'effective_spin']:
parvals = conversions.chi_eff(m1, m2, s1z, s2z)
elif par == 'template_duration':
# default to SEOBNRv4 duration function
parvals = pnutils.get_imr_duration(m1, m2, s1z, s2z, args.f_lower,
args.approximant or "SEOBNRv4")
if args.min_duration:
parvals += args.min_duration
elif par == 'tau0':
parvals = conversions.tau0_from_mass1_mass2(m1, m2, args.f_lower)
elif par == 'tau3':
parvals = conversions.tau3_from_mass1_mass2(m1, m2, args.f_lower)
elif par in pnutils.named_frequency_cutoffs.keys():
parvals = pnutils.frequency_cutoff_from_name(par, m1, m2, s1z, s2z)
else:
# try asking for a LALSimulation frequency function
parvals = pnutils.get_freq(par, m1, m2, s1z, s2z)
return parvals |
def _run_cmd(cmd):
"""Run command specified by :cmd: and return stdout, stderr and code."""
if not os.path.exists(cmd[0]):
cmd[0] = shutil.which(cmd[0])
assert cmd[0] is not None
shebang_parts = parseshebang.parse(cmd[0])
proc = subprocess.Popen(shebang_parts + cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return {
"stdout": stdout,
"stderr": stderr,
"code": proc.returncode
} | Run command specified by :cmd: and return stdout, stderr and code. | Below is the the instruction that describes the task:
### Input:
Run command specified by :cmd: and return stdout, stderr and code.
### Response:
def _run_cmd(cmd):
"""Run command specified by :cmd: and return stdout, stderr and code."""
if not os.path.exists(cmd[0]):
cmd[0] = shutil.which(cmd[0])
assert cmd[0] is not None
shebang_parts = parseshebang.parse(cmd[0])
proc = subprocess.Popen(shebang_parts + cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return {
"stdout": stdout,
"stderr": stderr,
"code": proc.returncode
} |
def on_put(self, request, response, txn_id=None):
"""Responds to PUT request containing events."""
response.body = "{}"
# Check whether repeat txn_id
if not self._is_new(txn_id):
response.status = falcon.HTTP_200
return
request.context["body"] = request.stream.read()
try:
events = json.loads(request.context["body"].decode("utf-8"))["events"]
except(KeyError, ValueError, UnicodeDecodeError):
response.status = falcon.HTTP_400
response.body = "Malformed request body"
return
if self.handler(EventStream(events, self.Api)):
response.status = falcon.HTTP_200
else:
response.status = falcon.HTTP_400 | Responds to PUT request containing events. | Below is the the instruction that describes the task:
### Input:
Responds to PUT request containing events.
### Response:
def on_put(self, request, response, txn_id=None):
"""Responds to PUT request containing events."""
response.body = "{}"
# Check whether repeat txn_id
if not self._is_new(txn_id):
response.status = falcon.HTTP_200
return
request.context["body"] = request.stream.read()
try:
events = json.loads(request.context["body"].decode("utf-8"))["events"]
except(KeyError, ValueError, UnicodeDecodeError):
response.status = falcon.HTTP_400
response.body = "Malformed request body"
return
if self.handler(EventStream(events, self.Api)):
response.status = falcon.HTTP_200
else:
response.status = falcon.HTTP_400 |
def readline(self, size=None):
"""Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile.
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
self.buffer = EMPTY
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:] | Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile. | Below is the the instruction that describes the task:
### Input:
Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile.
### Response:
def readline(self, size=None):
"""Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile.
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
self.buffer = EMPTY
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:] |
def createReference(self, fromnode, tonode, edge_data=None):
"""
Create a reference from fromnode to tonode
"""
if fromnode is None:
fromnode = self
fromident, toident = self.getIdent(fromnode), self.getIdent(tonode)
if fromident is None or toident is None:
return
self.msg(4, "createReference", fromnode, tonode, edge_data)
self.graph.add_edge(fromident, toident, edge_data=edge_data) | Create a reference from fromnode to tonode | Below is the the instruction that describes the task:
### Input:
Create a reference from fromnode to tonode
### Response:
def createReference(self, fromnode, tonode, edge_data=None):
"""
Create a reference from fromnode to tonode
"""
if fromnode is None:
fromnode = self
fromident, toident = self.getIdent(fromnode), self.getIdent(tonode)
if fromident is None or toident is None:
return
self.msg(4, "createReference", fromnode, tonode, edge_data)
self.graph.add_edge(fromident, toident, edge_data=edge_data) |
def searchTriples(filenames,plant=False):
"""Given a list of exposure numbers, find all the KBOs in that set of exposures"""
print filenames
if opt.none :
return
import MOPfits,os
import MOPdbaccess
import string
import os.path
import pyfits
if len(filenames)!=3:
raise TaskError, "got %d exposures"%(len(expnums))
### Some program Constants
proc_these_files=[]
if not plant:
proc_these_files.append("# Files to be planted and searched\n")
proc_these_files.append("# image fwhm plant\n")
for filename in filenames:
try:
mysql=MOPdbaccess.connect('bucket','cfhls','MYSQL')
bucket=mysql.cursor()
except:
raise TaskError, "mysql failed"
#bucket.execute("SELECT obs_iq_refccd FROM exposure WHERE expnum=%s" , (expnum, ) )
#row=bucket.fetchone()
#mysql.close()
#fwhm=row[0]
#if not fwhm > 0:
fwhm=1.0
if not plant:
#proc_these_files.append("%s %f %s \n" % ( filename[0], fwhm/0.183, 'no'))
pstr='NO'
else:
pstr='YES'
### since we're planting we need a psf. JMPMAKEPSF will
### update the proc-these-files listing
### run the make psf script .. always. This creates proc-these-files
### which is needed by the find.pl script.
command='jmpmakepsf.csh ./ %s %s' % ( filename, pstr )
if opt.verbose:
sys.stderr.write( command )
try:
os.system(command)
except:
raise TaskError, "jmpmakepsf noexec"
if os.access(filename+'.jmpmakepsf.FAILED',os.R_OK) or not os.access(filename+".psf.fits", os.R_OK) :
if plant:
raise TaskError, "jmpmakepsf failed"
# do without plant
else:
plant=False
pstr='NO'
### we're not planting so, lets keep going
### but check that there is a line in proc_these_files
add_line=True
if not os.access('proc-these-files',os.R_OK):
f=open('proc-these-files','w')
for l in proc_these_files:
f.write(l)
f.close()
f=open('proc-these-files','r')
ptf_lines=f.readlines()
f.close()
for ptf_line in ptf_lines:
if ptf_line[0]=='#':
continue
ptf_a=ptf_line.split()
import re
if re.search('%s' % (filename),ptf_a[0]):
### there's already a line for this one
add_line=False
break
if add_line:
f=open('proc-these-files','a')
f.write("%s %f %s \n" % ( filename, fwhm/0.183, 'no'))
f.close()
if opt.none:
return(-1)
prefix=''
if plant:
command="plant.csh ./ "
#command="plant.csh ./ -rmin %s -rmax %s -ang %s -width %s " % ( opt.rmin, opt.rmax, opt.angle, opt.width)
try:
os.system(command)
except:
raise TaskError, 'plant exec. failed'
if not os.access('plant.OK',os.R_OK):
raise TaskError, 'plant failed'
prefix='fk'
#else:
# f=open('proc-these-files','w')
# for line in proc_these_files:
# f.write(line)
# f.flush()
# f.close()
if opt.rerun and os.access('find.OK',os.R_OK):
os.unlink("find.OK")
#command="find.pl -p "+prefix+" -rn %s -rx %s -a %s -aw %s -d ./ " % ( opt.rmin, opt.rmax, opt.angle, opt.width)
command="find.pl -p "+prefix+" -d ./ "
if opt.union :
command+=" -u"
if opt.verbose:
sys.stderr.write( command )
try:
os.system(command)
except:
raise TaskErorr, "execute find"
if not os.access("find.OK",os.R_OK):
raise TaskError, "find failed"
### check the transformation file
command = "checktrans -p "+prefix
try:
os.system(command)
except:
raise TaskError, "execute checktrans"
if not os.access("checktrans.OK",os.R_OK):
raise TaskError, "checktrans failed"
elif os.access("checktrans.FAILED",os.R_OK):
os.unlink("checktrans.FAILED")
if os.access("BAD_TRANS"+prefix,os.R_OK):
raise TaskError,"BAD TRANS"
## check that the transformation in .trans.jmp files look reasonable
import math
for filename in filenames:
try:
for line in open(filename+".trans.jmp"):
for v in line.split():
if math.fabs(float(v)) > 200:
raise TaskError,"BAD TRANS"
except:
raise TaskError, "TRAN_CHECK FAILED"
astrom=prefix+filenames[0]+".cands.comb"
if opt.plant:
for filename in filenames:
try:
ushort(prefix+filename+".fits")
except:
raise TaskError("ushort failed %s" % (prefix+filename+".fits"))
if opt.plant:
astrom=prefix+filenames[0]+".comb.found"
try:
#make sure we have +5 lines in this file
lines=file(astrom).readlines()
if len(lines)<5:
raise TaskError,"Too few Found"
except:
raise TaskError, "Error reading %s" %(astrom)
if os.access(astrom,os.R_OK):
return(1)
else:
return(0) | Given a list of exposure numbers, find all the KBOs in that set of exposures | Below is the the instruction that describes the task:
### Input:
Given a list of exposure numbers, find all the KBOs in that set of exposures
### Response:
def searchTriples(filenames,plant=False):
"""Given a list of exposure numbers, find all the KBOs in that set of exposures"""
print filenames
if opt.none :
return
import MOPfits,os
import MOPdbaccess
import string
import os.path
import pyfits
if len(filenames)!=3:
raise TaskError, "got %d exposures"%(len(expnums))
### Some program Constants
proc_these_files=[]
if not plant:
proc_these_files.append("# Files to be planted and searched\n")
proc_these_files.append("# image fwhm plant\n")
for filename in filenames:
try:
mysql=MOPdbaccess.connect('bucket','cfhls','MYSQL')
bucket=mysql.cursor()
except:
raise TaskError, "mysql failed"
#bucket.execute("SELECT obs_iq_refccd FROM exposure WHERE expnum=%s" , (expnum, ) )
#row=bucket.fetchone()
#mysql.close()
#fwhm=row[0]
#if not fwhm > 0:
fwhm=1.0
if not plant:
#proc_these_files.append("%s %f %s \n" % ( filename[0], fwhm/0.183, 'no'))
pstr='NO'
else:
pstr='YES'
### since we're planting we need a psf. JMPMAKEPSF will
### update the proc-these-files listing
### run the make psf script .. always. This creates proc-these-files
### which is needed by the find.pl script.
command='jmpmakepsf.csh ./ %s %s' % ( filename, pstr )
if opt.verbose:
sys.stderr.write( command )
try:
os.system(command)
except:
raise TaskError, "jmpmakepsf noexec"
if os.access(filename+'.jmpmakepsf.FAILED',os.R_OK) or not os.access(filename+".psf.fits", os.R_OK) :
if plant:
raise TaskError, "jmpmakepsf failed"
# do without plant
else:
plant=False
pstr='NO'
### we're not planting so, lets keep going
### but check that there is a line in proc_these_files
add_line=True
if not os.access('proc-these-files',os.R_OK):
f=open('proc-these-files','w')
for l in proc_these_files:
f.write(l)
f.close()
f=open('proc-these-files','r')
ptf_lines=f.readlines()
f.close()
for ptf_line in ptf_lines:
if ptf_line[0]=='#':
continue
ptf_a=ptf_line.split()
import re
if re.search('%s' % (filename),ptf_a[0]):
### there's already a line for this one
add_line=False
break
if add_line:
f=open('proc-these-files','a')
f.write("%s %f %s \n" % ( filename, fwhm/0.183, 'no'))
f.close()
if opt.none:
return(-1)
prefix=''
if plant:
command="plant.csh ./ "
#command="plant.csh ./ -rmin %s -rmax %s -ang %s -width %s " % ( opt.rmin, opt.rmax, opt.angle, opt.width)
try:
os.system(command)
except:
raise TaskError, 'plant exec. failed'
if not os.access('plant.OK',os.R_OK):
raise TaskError, 'plant failed'
prefix='fk'
#else:
# f=open('proc-these-files','w')
# for line in proc_these_files:
# f.write(line)
# f.flush()
# f.close()
if opt.rerun and os.access('find.OK',os.R_OK):
os.unlink("find.OK")
#command="find.pl -p "+prefix+" -rn %s -rx %s -a %s -aw %s -d ./ " % ( opt.rmin, opt.rmax, opt.angle, opt.width)
command="find.pl -p "+prefix+" -d ./ "
if opt.union :
command+=" -u"
if opt.verbose:
sys.stderr.write( command )
try:
os.system(command)
except:
raise TaskErorr, "execute find"
if not os.access("find.OK",os.R_OK):
raise TaskError, "find failed"
### check the transformation file
command = "checktrans -p "+prefix
try:
os.system(command)
except:
raise TaskError, "execute checktrans"
if not os.access("checktrans.OK",os.R_OK):
raise TaskError, "checktrans failed"
elif os.access("checktrans.FAILED",os.R_OK):
os.unlink("checktrans.FAILED")
if os.access("BAD_TRANS"+prefix,os.R_OK):
raise TaskError,"BAD TRANS"
## check that the transformation in .trans.jmp files look reasonable
import math
for filename in filenames:
try:
for line in open(filename+".trans.jmp"):
for v in line.split():
if math.fabs(float(v)) > 200:
raise TaskError,"BAD TRANS"
except:
raise TaskError, "TRAN_CHECK FAILED"
astrom=prefix+filenames[0]+".cands.comb"
if opt.plant:
for filename in filenames:
try:
ushort(prefix+filename+".fits")
except:
raise TaskError("ushort failed %s" % (prefix+filename+".fits"))
if opt.plant:
astrom=prefix+filenames[0]+".comb.found"
try:
#make sure we have +5 lines in this file
lines=file(astrom).readlines()
if len(lines)<5:
raise TaskError,"Too few Found"
except:
raise TaskError, "Error reading %s" %(astrom)
if os.access(astrom,os.R_OK):
return(1)
else:
return(0) |
def _remove_media(self,directory,files=None):
"""Removes specified files from flickr"""
# Connect if we aren't already
if not self._connectToFlickr():
logger.error("%s - Couldn't connect to flickr")
return False
db=self._loadDB(directory)
# If no files given, use files from DB in dir
if not files:
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
for fn in files:
print("%s - Deleting from flickr [local copy intact]"%(fn))
try:
pid=db[fn]['photoid']
except:
logger.debug("%s - Was never in flickr DB"%(fn))
continue
resp=self.flickr.photos_delete(photo_id=pid,format='etree')
if resp.attrib['stat']!='ok':
print("%s - flickr: delete failed with status: %s",\
resp.attrib['stat']);
return False
else:
logger.debug('Removing %s from flickr DB'%(fn))
del db[fn]
self._saveDB(directory,db)
return True | Removes specified files from flickr | Below is the the instruction that describes the task:
### Input:
Removes specified files from flickr
### Response:
def _remove_media(self,directory,files=None):
"""Removes specified files from flickr"""
# Connect if we aren't already
if not self._connectToFlickr():
logger.error("%s - Couldn't connect to flickr")
return False
db=self._loadDB(directory)
# If no files given, use files from DB in dir
if not files:
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
for fn in files:
print("%s - Deleting from flickr [local copy intact]"%(fn))
try:
pid=db[fn]['photoid']
except:
logger.debug("%s - Was never in flickr DB"%(fn))
continue
resp=self.flickr.photos_delete(photo_id=pid,format='etree')
if resp.attrib['stat']!='ok':
print("%s - flickr: delete failed with status: %s",\
resp.attrib['stat']);
return False
else:
logger.debug('Removing %s from flickr DB'%(fn))
del db[fn]
self._saveDB(directory,db)
return True |
def _append(self, menu):
'''append this menu item to a menu'''
from wx_loader import wx
menu.AppendMenu(-1, self.name, self.wx_menu()) | append this menu item to a menu | Below is the the instruction that describes the task:
### Input:
append this menu item to a menu
### Response:
def _append(self, menu):
'''append this menu item to a menu'''
from wx_loader import wx
menu.AppendMenu(-1, self.name, self.wx_menu()) |
def to_uint(self):
"""Convert vector to an unsigned integer, if possible.
This is only useful for arrays filled with zero/one entries.
"""
num = 0
for i, f in enumerate(self._items):
if f.is_zero():
pass
elif f.is_one():
num += 1 << i
else:
fstr = "expected all functions to be a constant (0 or 1) form"
raise ValueError(fstr)
return num | Convert vector to an unsigned integer, if possible.
This is only useful for arrays filled with zero/one entries. | Below is the the instruction that describes the task:
### Input:
Convert vector to an unsigned integer, if possible.
This is only useful for arrays filled with zero/one entries.
### Response:
def to_uint(self):
"""Convert vector to an unsigned integer, if possible.
This is only useful for arrays filled with zero/one entries.
"""
num = 0
for i, f in enumerate(self._items):
if f.is_zero():
pass
elif f.is_one():
num += 1 << i
else:
fstr = "expected all functions to be a constant (0 or 1) form"
raise ValueError(fstr)
return num |
def smooth(self, smoothing_factor):
"""
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
"""
forward_smooth = {}
backward_smooth = {}
output = {}
if self:
pre = self.values[0]
next = self.values[-1]
for key, value in self.items():
forward_smooth[key] = smoothing_factor * pre + (1 - smoothing_factor) * value
pre = forward_smooth[key]
for key, value in reversed(self.items()):
backward_smooth[key] = smoothing_factor * next + (1 - smoothing_factor) * value
next = backward_smooth[key]
for key in forward_smooth.keys():
output[key] = (forward_smooth[key] + backward_smooth[key]) / 2
return TimeSeries(output) | return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object. | Below is the the instruction that describes the task:
### Input:
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
### Response:
def smooth(self, smoothing_factor):
"""
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
"""
forward_smooth = {}
backward_smooth = {}
output = {}
if self:
pre = self.values[0]
next = self.values[-1]
for key, value in self.items():
forward_smooth[key] = smoothing_factor * pre + (1 - smoothing_factor) * value
pre = forward_smooth[key]
for key, value in reversed(self.items()):
backward_smooth[key] = smoothing_factor * next + (1 - smoothing_factor) * value
next = backward_smooth[key]
for key in forward_smooth.keys():
output[key] = (forward_smooth[key] + backward_smooth[key]) / 2
return TimeSeries(output) |
def _run_pants(self, sock, arguments, environment):
"""Execute a given run with a pants runner."""
self.server.runner_factory(sock, arguments, environment).run() | Execute a given run with a pants runner. | Below is the the instruction that describes the task:
### Input:
Execute a given run with a pants runner.
### Response:
def _run_pants(self, sock, arguments, environment):
"""Execute a given run with a pants runner."""
self.server.runner_factory(sock, arguments, environment).run() |
def get_object_url(self):
"""
Returns the url to link to the object
The get_view_url will be called on the current bundle using
'edit` as the view name.
"""
return self.bundle.get_view_url('edit',
self.request.user, {}, self.kwargs) | Returns the url to link to the object
The get_view_url will be called on the current bundle using
'edit` as the view name. | Below is the the instruction that describes the task:
### Input:
Returns the url to link to the object
The get_view_url will be called on the current bundle using
'edit` as the view name.
### Response:
def get_object_url(self):
"""
Returns the url to link to the object
The get_view_url will be called on the current bundle using
'edit` as the view name.
"""
return self.bundle.get_view_url('edit',
self.request.user, {}, self.kwargs) |
def _get_secret_from_vault(
key, environment=None, stage=None, namespace=None,
wait_exponential_multiplier=50, wait_exponential_max=5000,
stop_max_delay=10000):
"""Retrieves a secret from the secrets vault."""
# Get the encrypted secret from DynamoDB
table_name = _secrets_table_name(environment=environment, stage=stage)
if namespace:
key = "{}:{}".format(namespace, key)
if table_name is None:
logger.warning("Can't produce secrets table name: unable to retrieve "
"secret '{}'".format(key))
return
client = boto3.client('dynamodb')
logger.info("Retriving key '{}' from table '{}'".format(
key, table_name))
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=wait_exponential_multiplier,
wait_exponential_max=wait_exponential_max,
stop_max_delay=stop_max_delay)
def get_item():
try:
return client.get_item(
TableName=table_name,
Key={'id': {'S': key}}).get('Item', {}).get(
'value', {}).get('B')
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
encrypted = get_item()
if encrypted is None:
return
# Decrypt using KMS
client = boto3.client('kms')
try:
value = client.decrypt(CiphertextBlob=encrypted)['Plaintext'].decode()
except ClientError:
logger.error("KMS error when trying to decrypt secret")
traceback.print_exc()
return
try:
value = json.loads(value)
except (TypeError, ValueError):
# It's ok, the client should know how to deal with the value
pass
return value | Retrieves a secret from the secrets vault. | Below is the the instruction that describes the task:
### Input:
Retrieves a secret from the secrets vault.
### Response:
def _get_secret_from_vault(
key, environment=None, stage=None, namespace=None,
wait_exponential_multiplier=50, wait_exponential_max=5000,
stop_max_delay=10000):
"""Retrieves a secret from the secrets vault."""
# Get the encrypted secret from DynamoDB
table_name = _secrets_table_name(environment=environment, stage=stage)
if namespace:
key = "{}:{}".format(namespace, key)
if table_name is None:
logger.warning("Can't produce secrets table name: unable to retrieve "
"secret '{}'".format(key))
return
client = boto3.client('dynamodb')
logger.info("Retriving key '{}' from table '{}'".format(
key, table_name))
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=wait_exponential_multiplier,
wait_exponential_max=wait_exponential_max,
stop_max_delay=stop_max_delay)
def get_item():
try:
return client.get_item(
TableName=table_name,
Key={'id': {'S': key}}).get('Item', {}).get(
'value', {}).get('B')
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
encrypted = get_item()
if encrypted is None:
return
# Decrypt using KMS
client = boto3.client('kms')
try:
value = client.decrypt(CiphertextBlob=encrypted)['Plaintext'].decode()
except ClientError:
logger.error("KMS error when trying to decrypt secret")
traceback.print_exc()
return
try:
value = json.loads(value)
except (TypeError, ValueError):
# It's ok, the client should know how to deal with the value
pass
return value |
def populateFromFile(self, dataUrls, indexFiles):
"""
Populates this variant set using the specified lists of data
files and indexes. These must be in the same order, such that
the jth index file corresponds to the jth data file.
"""
assert len(dataUrls) == len(indexFiles)
for dataUrl, indexFile in zip(dataUrls, indexFiles):
varFile = pysam.VariantFile(dataUrl, index_filename=indexFile)
try:
self._populateFromVariantFile(varFile, dataUrl, indexFile)
finally:
varFile.close() | Populates this variant set using the specified lists of data
files and indexes. These must be in the same order, such that
the jth index file corresponds to the jth data file. | Below is the the instruction that describes the task:
### Input:
Populates this variant set using the specified lists of data
files and indexes. These must be in the same order, such that
the jth index file corresponds to the jth data file.
### Response:
def populateFromFile(self, dataUrls, indexFiles):
"""
Populates this variant set using the specified lists of data
files and indexes. These must be in the same order, such that
the jth index file corresponds to the jth data file.
"""
assert len(dataUrls) == len(indexFiles)
for dataUrl, indexFile in zip(dataUrls, indexFiles):
varFile = pysam.VariantFile(dataUrl, index_filename=indexFile)
try:
self._populateFromVariantFile(varFile, dataUrl, indexFile)
finally:
varFile.close() |
def getattr_path(obj, path):
"""
Get an attribute path, as defined by a string separated by '__'.
getattr_path(foo, 'a__b__c') is roughly equivalent to foo.a.b.c but
will short circuit to return None if something on the path is None.
"""
path = path.split('__')
for name in path:
obj = getattr(obj, name)
if obj is None:
return None
return obj | Get an attribute path, as defined by a string separated by '__'.
getattr_path(foo, 'a__b__c') is roughly equivalent to foo.a.b.c but
will short circuit to return None if something on the path is None. | Below is the the instruction that describes the task:
### Input:
Get an attribute path, as defined by a string separated by '__'.
getattr_path(foo, 'a__b__c') is roughly equivalent to foo.a.b.c but
will short circuit to return None if something on the path is None.
### Response:
def getattr_path(obj, path):
"""
Get an attribute path, as defined by a string separated by '__'.
getattr_path(foo, 'a__b__c') is roughly equivalent to foo.a.b.c but
will short circuit to return None if something on the path is None.
"""
path = path.split('__')
for name in path:
obj = getattr(obj, name)
if obj is None:
return None
return obj |
def get_role_policy(role_name, policy_name, region=None, key=None,
keyid=None, profile=None):
'''
Get a role policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_role_policy myirole mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
_policy = conn.get_role_policy(role_name, policy_name)
# I _hate_ you for not giving me an object boto.
_policy = _policy.get_role_policy_response.policy_document
# Policy is url encoded
_policy = _unquote(_policy)
_policy = salt.utils.json.loads(_policy, object_pairs_hook=odict.OrderedDict)
return _policy
except boto.exception.BotoServerError:
return {} | Get a role policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_role_policy myirole mypolicy | Below is the the instruction that describes the task:
### Input:
Get a role policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_role_policy myirole mypolicy
### Response:
def get_role_policy(role_name, policy_name, region=None, key=None,
keyid=None, profile=None):
'''
Get a role policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_role_policy myirole mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
_policy = conn.get_role_policy(role_name, policy_name)
# I _hate_ you for not giving me an object boto.
_policy = _policy.get_role_policy_response.policy_document
# Policy is url encoded
_policy = _unquote(_policy)
_policy = salt.utils.json.loads(_policy, object_pairs_hook=odict.OrderedDict)
return _policy
except boto.exception.BotoServerError:
return {} |
def partition_horizontal_twice(thelist, numbers):
"""
numbers is split on a comma to n and n2.
Break a list into peices each peice alternating between n and n2 items long
``partition_horizontal_twice(range(14), "3,4")`` gives::
[[0, 1, 2],
[3, 4, 5, 6],
[7, 8, 9],
[10, 11, 12, 13]]
Clear as mud?
"""
n, n2 = numbers.split(',')
try:
n = int(n)
n2 = int(n2)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
newlists = []
while thelist:
newlists.append(thelist[:n])
thelist = thelist[n:]
newlists.append(thelist[:n2])
thelist = thelist[n2:]
return newlists | numbers is split on a comma to n and n2.
Break a list into peices each peice alternating between n and n2 items long
``partition_horizontal_twice(range(14), "3,4")`` gives::
[[0, 1, 2],
[3, 4, 5, 6],
[7, 8, 9],
[10, 11, 12, 13]]
Clear as mud? | Below is the the instruction that describes the task:
### Input:
numbers is split on a comma to n and n2.
Break a list into peices each peice alternating between n and n2 items long
``partition_horizontal_twice(range(14), "3,4")`` gives::
[[0, 1, 2],
[3, 4, 5, 6],
[7, 8, 9],
[10, 11, 12, 13]]
Clear as mud?
### Response:
def partition_horizontal_twice(thelist, numbers):
"""
numbers is split on a comma to n and n2.
Break a list into peices each peice alternating between n and n2 items long
``partition_horizontal_twice(range(14), "3,4")`` gives::
[[0, 1, 2],
[3, 4, 5, 6],
[7, 8, 9],
[10, 11, 12, 13]]
Clear as mud?
"""
n, n2 = numbers.split(',')
try:
n = int(n)
n2 = int(n2)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
newlists = []
while thelist:
newlists.append(thelist[:n])
thelist = thelist[n:]
newlists.append(thelist[:n2])
thelist = thelist[n2:]
return newlists |
def abstract(class_):
"""Mark the class as _abstract_ base class, forbidding its instantiation.
.. note::
Unlike other modifiers, ``@abstract`` can be applied
to all Python classes, not just subclasses of :class:`Object`.
.. versionadded:: 0.0.3
"""
if not inspect.isclass(class_):
raise TypeError("@abstract can only be applied to classes")
abc_meta = None
# if the class is not already using a metaclass specific to ABC,
# we need to change that
class_meta = type(class_)
if class_meta not in (_ABCMetaclass, _ABCObjectMetaclass):
# decide what metaclass to use, depending on whether it's a subclass
# of our universal :class:`Object` or not
if class_meta is type:
abc_meta = _ABCMetaclass # like ABCMeta, but can never instantiate
elif class_meta is ObjectMetaclass:
abc_meta = _ABCObjectMetaclass # ABCMeta mixed with ObjectMetaclass
else:
raise ValueError(
"@abstract cannot be applied to classes with custom metaclass")
class_.__abstract__ = True
return metaclass(abc_meta)(class_) if abc_meta else class_ | Mark the class as _abstract_ base class, forbidding its instantiation.
.. note::
Unlike other modifiers, ``@abstract`` can be applied
to all Python classes, not just subclasses of :class:`Object`.
.. versionadded:: 0.0.3 | Below is the the instruction that describes the task:
### Input:
Mark the class as _abstract_ base class, forbidding its instantiation.
.. note::
Unlike other modifiers, ``@abstract`` can be applied
to all Python classes, not just subclasses of :class:`Object`.
.. versionadded:: 0.0.3
### Response:
def abstract(class_):
"""Mark the class as _abstract_ base class, forbidding its instantiation.
.. note::
Unlike other modifiers, ``@abstract`` can be applied
to all Python classes, not just subclasses of :class:`Object`.
.. versionadded:: 0.0.3
"""
if not inspect.isclass(class_):
raise TypeError("@abstract can only be applied to classes")
abc_meta = None
# if the class is not already using a metaclass specific to ABC,
# we need to change that
class_meta = type(class_)
if class_meta not in (_ABCMetaclass, _ABCObjectMetaclass):
# decide what metaclass to use, depending on whether it's a subclass
# of our universal :class:`Object` or not
if class_meta is type:
abc_meta = _ABCMetaclass # like ABCMeta, but can never instantiate
elif class_meta is ObjectMetaclass:
abc_meta = _ABCObjectMetaclass # ABCMeta mixed with ObjectMetaclass
else:
raise ValueError(
"@abstract cannot be applied to classes with custom metaclass")
class_.__abstract__ = True
return metaclass(abc_meta)(class_) if abc_meta else class_ |
def libvlc_video_get_size(p_mi, num):
'''Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
'''
f = _Cfunctions.get('libvlc_video_get_size', None) or \
_Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
return f(p_mi, num) | Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height. | Below is the the instruction that describes the task:
### Input:
Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
### Response:
def libvlc_video_get_size(p_mi, num):
'''Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
'''
f = _Cfunctions.get('libvlc_video_get_size', None) or \
_Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
return f(p_mi, num) |
def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False):
'check for and remove any pids which have no corresponding process'
if isinstance(pids, (int, float, long)):
pids = [pids]
pids = str2list(pids, map_=unicode)
procs = map(unicode, os.listdir('/proc'))
running = [pid for pid in pids if pid in procs]
logger.warn(
"Found %s pids running: %s" % (len(running),
running))
prefix = prefix.rstrip('.') if prefix else None
for pid in pids:
if prefix:
_prefix = prefix
else:
_prefix = unicode(pid)
# remove non-running procs
if pid in running:
continue
if multi:
pid_file = '%s%s.pid' % (_prefix, pid)
else:
pid_file = '%s.pid' % (_prefix)
path = os.path.join(pid_dir, pid_file)
if os.path.exists(path):
logger.debug("Removing pidfile: %s" % path)
try:
remove_file(path)
except OSError as e:
logger.warn(e)
return running | check for and remove any pids which have no corresponding process | Below is the the instruction that describes the task:
### Input:
check for and remove any pids which have no corresponding process
### Response:
def clear_stale_pids(pids, pid_dir='/tmp', prefix='', multi=False):
'check for and remove any pids which have no corresponding process'
if isinstance(pids, (int, float, long)):
pids = [pids]
pids = str2list(pids, map_=unicode)
procs = map(unicode, os.listdir('/proc'))
running = [pid for pid in pids if pid in procs]
logger.warn(
"Found %s pids running: %s" % (len(running),
running))
prefix = prefix.rstrip('.') if prefix else None
for pid in pids:
if prefix:
_prefix = prefix
else:
_prefix = unicode(pid)
# remove non-running procs
if pid in running:
continue
if multi:
pid_file = '%s%s.pid' % (_prefix, pid)
else:
pid_file = '%s.pid' % (_prefix)
path = os.path.join(pid_dir, pid_file)
if os.path.exists(path):
logger.debug("Removing pidfile: %s" % path)
try:
remove_file(path)
except OSError as e:
logger.warn(e)
return running |
def mean(a, rep=0.75, **kwargs):
"""Compute the average along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value
"""
return rfunc(a, ma.mean, rep, **kwargs) | Compute the average along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value | Below is the the instruction that describes the task:
### Input:
Compute the average along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value
### Response:
def mean(a, rep=0.75, **kwargs):
"""Compute the average along a 1D array like ma.mean,
but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,
then the result is a masked value
"""
return rfunc(a, ma.mean, rep, **kwargs) |
def verifyG1(x, tTilde, y, pi, errorOnFail=True):
"""
Verifies a zero-knowledge proof where p \in G1.
@errorOnFail: Raise an exception if the proof does not hold.
"""
# Unpack the proof
p,c,u = pi
# Verify types
assertType(x, G1Element)
assertType(tTilde, G2Element)
assertType(y, GtElement)
assertType(p, G1Element)
# TODO: beta can be pre-computed while waiting for a server response.
Q = generatorG1()
beta = pair(x,tTilde)
# Recompute c'
t1 = Q*u + p*c
t2 = beta**u * y**c
t1.normalize()
cPrime = hashZ(Q,p,beta,y,t1,t2)
# Check computed @c' against server's value @c
if cPrime == c:
return True
if errorOnFail:
raise Exception("zero-knowledge proof failed verification.")
else:
return False | Verifies a zero-knowledge proof where p \in G1.
@errorOnFail: Raise an exception if the proof does not hold. | Below is the the instruction that describes the task:
### Input:
Verifies a zero-knowledge proof where p \in G1.
@errorOnFail: Raise an exception if the proof does not hold.
### Response:
def verifyG1(x, tTilde, y, pi, errorOnFail=True):
"""
Verifies a zero-knowledge proof where p \in G1.
@errorOnFail: Raise an exception if the proof does not hold.
"""
# Unpack the proof
p,c,u = pi
# Verify types
assertType(x, G1Element)
assertType(tTilde, G2Element)
assertType(y, GtElement)
assertType(p, G1Element)
# TODO: beta can be pre-computed while waiting for a server response.
Q = generatorG1()
beta = pair(x,tTilde)
# Recompute c'
t1 = Q*u + p*c
t2 = beta**u * y**c
t1.normalize()
cPrime = hashZ(Q,p,beta,y,t1,t2)
# Check computed @c' against server's value @c
if cPrime == c:
return True
if errorOnFail:
raise Exception("zero-knowledge proof failed verification.")
else:
return False |
def fisher_mean(data):
"""
Calculates the Fisher mean and associated parameter from a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
"""
R, Xbar, X, fpars = 0, [0, 0, 0], [], {}
N = len(data)
if N < 2:
return fpars
X = dir2cart(data)
for i in range(len(X)):
for c in range(3):
Xbar[c] += X[i][c]
for c in range(3):
R += Xbar[c]**2
R = np.sqrt(R)
for c in range(3):
Xbar[c] = Xbar[c]/R
dir = cart2dir(Xbar)
fpars["dec"] = dir[0]
fpars["inc"] = dir[1]
fpars["n"] = N
fpars["r"] = R
if N != R:
k = (N - 1.) / (N - R)
fpars["k"] = k
csd = 81./np.sqrt(k)
else:
fpars['k'] = 'inf'
csd = 0.
b = 20.**(1./(N - 1.)) - 1
a = 1 - b * (N - R) / R
if a < -1:
a = -1
a95 = np.degrees(np.arccos(a))
fpars["alpha95"] = a95
fpars["csd"] = csd
if a < 0:
fpars["alpha95"] = 180.0
return fpars | Calculates the Fisher mean and associated parameter from a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence | Below is the the instruction that describes the task:
### Input:
Calculates the Fisher mean and associated parameter from a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
### Response:
def fisher_mean(data):
"""
Calculates the Fisher mean and associated parameter from a di_block
Parameters
----------
di_block : a nested list of [dec,inc] or [dec,inc,intensity]
Returns
-------
fpars : dictionary containing the Fisher mean and statistics
dec : mean declination
inc : mean inclination
r : resultant vector length
n : number of data points
k : Fisher k value
csd : Fisher circular standard deviation
alpha95 : Fisher circle of 95% confidence
"""
R, Xbar, X, fpars = 0, [0, 0, 0], [], {}
N = len(data)
if N < 2:
return fpars
X = dir2cart(data)
for i in range(len(X)):
for c in range(3):
Xbar[c] += X[i][c]
for c in range(3):
R += Xbar[c]**2
R = np.sqrt(R)
for c in range(3):
Xbar[c] = Xbar[c]/R
dir = cart2dir(Xbar)
fpars["dec"] = dir[0]
fpars["inc"] = dir[1]
fpars["n"] = N
fpars["r"] = R
if N != R:
k = (N - 1.) / (N - R)
fpars["k"] = k
csd = 81./np.sqrt(k)
else:
fpars['k'] = 'inf'
csd = 0.
b = 20.**(1./(N - 1.)) - 1
a = 1 - b * (N - R) / R
if a < -1:
a = -1
a95 = np.degrees(np.arccos(a))
fpars["alpha95"] = a95
fpars["csd"] = csd
if a < 0:
fpars["alpha95"] = 180.0
return fpars |
def bulk_insert_rows(self, table, rows, target_fields=None, commit_every=5000):
"""
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
"""
if not rows:
raise ValueError("parameter rows could not be None or empty iterable")
conn = self.get_conn()
cursor = conn.cursor()
values_base = target_fields if target_fields else rows[0]
prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(
tablename=table,
columns='({})'.format(', '.join(target_fields)) if target_fields else '',
values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),
)
row_count = 0
# Chunk the rows
row_chunk = []
for row in rows:
row_chunk.append(row)
row_count += 1
if row_count % commit_every == 0:
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
# Empty chunk
row_chunk = []
# Commit the leftover chunk
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
cursor.close()
conn.close() | A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int | Below is the the instruction that describes the task:
### Input:
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
### Response:
def bulk_insert_rows(self, table, rows, target_fields=None, commit_every=5000):
"""
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
"""
if not rows:
raise ValueError("parameter rows could not be None or empty iterable")
conn = self.get_conn()
cursor = conn.cursor()
values_base = target_fields if target_fields else rows[0]
prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(
tablename=table,
columns='({})'.format(', '.join(target_fields)) if target_fields else '',
values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),
)
row_count = 0
# Chunk the rows
row_chunk = []
for row in rows:
row_chunk.append(row)
row_count += 1
if row_count % commit_every == 0:
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
# Empty chunk
row_chunk = []
# Commit the leftover chunk
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
cursor.close()
conn.close() |
def chipqc(bam_file, sample, out_dir):
"""Attempt code to run ChIPQC bioconductor packate in one sample"""
sample_name = dd.get_sample_name(sample)
logger.warning("ChIPQC is unstable right now, if it breaks, turn off the tool.")
if utils.file_exists(out_dir):
return _get_output(out_dir)
with tx_tmpdir() as tmp_dir:
rcode = _sample_template(sample, tmp_dir)
if rcode:
# local_sitelib = utils.R_sitelib()
rscript = utils.Rscript_cmd()
do.run([rscript, "--no-environ", rcode], "ChIPQC in %s" % sample_name, log_error=False)
shutil.move(tmp_dir, out_dir)
return _get_output(out_dir) | Attempt code to run ChIPQC bioconductor packate in one sample | Below is the the instruction that describes the task:
### Input:
Attempt code to run ChIPQC bioconductor packate in one sample
### Response:
def chipqc(bam_file, sample, out_dir):
"""Attempt code to run ChIPQC bioconductor packate in one sample"""
sample_name = dd.get_sample_name(sample)
logger.warning("ChIPQC is unstable right now, if it breaks, turn off the tool.")
if utils.file_exists(out_dir):
return _get_output(out_dir)
with tx_tmpdir() as tmp_dir:
rcode = _sample_template(sample, tmp_dir)
if rcode:
# local_sitelib = utils.R_sitelib()
rscript = utils.Rscript_cmd()
do.run([rscript, "--no-environ", rcode], "ChIPQC in %s" % sample_name, log_error=False)
shutil.move(tmp_dir, out_dir)
return _get_output(out_dir) |
def _NSPrefix(self, ns):
""" Get xml ns prefix. self.nsMap must be set """
if ns == self.defaultNS:
return ''
prefix = self.nsMap[ns]
return prefix and prefix + ':' or '' | Get xml ns prefix. self.nsMap must be set | Below is the the instruction that describes the task:
### Input:
Get xml ns prefix. self.nsMap must be set
### Response:
def _NSPrefix(self, ns):
""" Get xml ns prefix. self.nsMap must be set """
if ns == self.defaultNS:
return ''
prefix = self.nsMap[ns]
return prefix and prefix + ':' or '' |
def update_queue(self):
"""Update queue"""
started = 0
for parent_id, threadlist in list(self.started_threads.items()):
still_running = []
for thread in threadlist:
if thread.isFinished():
end_callback = self.end_callbacks.pop(id(thread))
if thread.results is not None:
# The thread was executed successfully
end_callback(thread.results)
thread.setParent(None)
thread = None
else:
still_running.append(thread)
started += 1
threadlist = None
if still_running:
self.started_threads[parent_id] = still_running
else:
self.started_threads.pop(parent_id)
logger.debug("Updating queue:")
logger.debug(" started: %d" % started)
logger.debug(" pending: %d" % len(self.pending_threads))
if self.pending_threads and started < self.max_simultaneous_threads:
thread, parent_id = self.pending_threads.pop(0)
thread.finished.connect(self.update_queue)
threadlist = self.started_threads.get(parent_id, [])
self.started_threads[parent_id] = threadlist+[thread]
logger.debug("===>starting: %r" % thread)
thread.start() | Update queue | Below is the the instruction that describes the task:
### Input:
Update queue
### Response:
def update_queue(self):
"""Update queue"""
started = 0
for parent_id, threadlist in list(self.started_threads.items()):
still_running = []
for thread in threadlist:
if thread.isFinished():
end_callback = self.end_callbacks.pop(id(thread))
if thread.results is not None:
# The thread was executed successfully
end_callback(thread.results)
thread.setParent(None)
thread = None
else:
still_running.append(thread)
started += 1
threadlist = None
if still_running:
self.started_threads[parent_id] = still_running
else:
self.started_threads.pop(parent_id)
logger.debug("Updating queue:")
logger.debug(" started: %d" % started)
logger.debug(" pending: %d" % len(self.pending_threads))
if self.pending_threads and started < self.max_simultaneous_threads:
thread, parent_id = self.pending_threads.pop(0)
thread.finished.connect(self.update_queue)
threadlist = self.started_threads.get(parent_id, [])
self.started_threads[parent_id] = threadlist+[thread]
logger.debug("===>starting: %r" % thread)
thread.start() |
def get_subdomain_entry(self, fqn, accepted=True, cur=None):
"""
Given a fully-qualified subdomain, get its (latest) subdomain record.
Raises SubdomainNotFound if there is no such subdomain
"""
get_cmd = "SELECT * FROM {} WHERE fully_qualified_subdomain=? {} ORDER BY sequence DESC, parent_zonefile_index DESC LIMIT 1;".format(self.subdomain_table, 'AND accepted=1' if accepted else '')
cursor = None
if cur is None:
cursor = self.conn.cursor()
else:
cursor = cur
db_query_execute(cursor, get_cmd, (fqn,))
try:
rowdata = cursor.fetchone()
assert rowdata
except Exception as e:
raise SubdomainNotFound(fqn)
return self._extract_subdomain(rowdata) | Given a fully-qualified subdomain, get its (latest) subdomain record.
Raises SubdomainNotFound if there is no such subdomain | Below is the the instruction that describes the task:
### Input:
Given a fully-qualified subdomain, get its (latest) subdomain record.
Raises SubdomainNotFound if there is no such subdomain
### Response:
def get_subdomain_entry(self, fqn, accepted=True, cur=None):
"""
Given a fully-qualified subdomain, get its (latest) subdomain record.
Raises SubdomainNotFound if there is no such subdomain
"""
get_cmd = "SELECT * FROM {} WHERE fully_qualified_subdomain=? {} ORDER BY sequence DESC, parent_zonefile_index DESC LIMIT 1;".format(self.subdomain_table, 'AND accepted=1' if accepted else '')
cursor = None
if cur is None:
cursor = self.conn.cursor()
else:
cursor = cur
db_query_execute(cursor, get_cmd, (fqn,))
try:
rowdata = cursor.fetchone()
assert rowdata
except Exception as e:
raise SubdomainNotFound(fqn)
return self._extract_subdomain(rowdata) |
def advance_recurring_todo(p_todo, p_offset=None, p_strict=False):
"""
Given a Todo item, return a new instance of a Todo item with the dates
shifted according to the recurrence rule.
Strict means that the real due date is taken as a offset, not today or a
future date to determine the offset.
When the todo item has no due date, then the date is used passed by the
caller (defaulting to today).
When no recurrence tag is present, an exception is raised.
"""
todo = Todo(p_todo.source())
pattern = todo.tag_value('rec')
if not pattern:
raise NoRecurrenceException()
elif pattern.startswith('+'):
p_strict = True
# strip off the +
pattern = pattern[1:]
if p_strict:
offset = p_todo.due_date() or p_offset or date.today()
else:
offset = p_offset or date.today()
length = todo.length()
new_due = relative_date_to_date(pattern, offset)
if not new_due:
raise NoRecurrenceException()
# pylint: disable=E1103
todo.set_tag(config().tag_due(), new_due.isoformat())
if todo.start_date():
new_start = new_due - timedelta(length)
todo.set_tag(config().tag_start(), new_start.isoformat())
todo.set_creation_date(date.today())
return todo | Given a Todo item, return a new instance of a Todo item with the dates
shifted according to the recurrence rule.
Strict means that the real due date is taken as a offset, not today or a
future date to determine the offset.
When the todo item has no due date, then the date is used passed by the
caller (defaulting to today).
When no recurrence tag is present, an exception is raised. | Below is the the instruction that describes the task:
### Input:
Given a Todo item, return a new instance of a Todo item with the dates
shifted according to the recurrence rule.
Strict means that the real due date is taken as a offset, not today or a
future date to determine the offset.
When the todo item has no due date, then the date is used passed by the
caller (defaulting to today).
When no recurrence tag is present, an exception is raised.
### Response:
def advance_recurring_todo(p_todo, p_offset=None, p_strict=False):
"""
Given a Todo item, return a new instance of a Todo item with the dates
shifted according to the recurrence rule.
Strict means that the real due date is taken as a offset, not today or a
future date to determine the offset.
When the todo item has no due date, then the date is used passed by the
caller (defaulting to today).
When no recurrence tag is present, an exception is raised.
"""
todo = Todo(p_todo.source())
pattern = todo.tag_value('rec')
if not pattern:
raise NoRecurrenceException()
elif pattern.startswith('+'):
p_strict = True
# strip off the +
pattern = pattern[1:]
if p_strict:
offset = p_todo.due_date() or p_offset or date.today()
else:
offset = p_offset or date.today()
length = todo.length()
new_due = relative_date_to_date(pattern, offset)
if not new_due:
raise NoRecurrenceException()
# pylint: disable=E1103
todo.set_tag(config().tag_due(), new_due.isoformat())
if todo.start_date():
new_start = new_due - timedelta(length)
todo.set_tag(config().tag_start(), new_start.isoformat())
todo.set_creation_date(date.today())
return todo |
def add_key_value(self, key, value):
"""Add custom field to Group object.
.. note:: The key must be the exact name required by the batch schema.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_key_value('fileName', 'something.pdf')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data.
"""
key = self._metadata_map.get(key, key)
if key in ['dateAdded', 'eventDate', 'firstSeen', 'publishDate']:
self._group_data[key] = self._utils.format_datetime(
value, date_format='%Y-%m-%dT%H:%M:%SZ'
)
elif key == 'file_content':
# file content arg is not part of Group JSON
pass
else:
self._group_data[key] = value | Add custom field to Group object.
.. note:: The key must be the exact name required by the batch schema.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_key_value('fileName', 'something.pdf')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data. | Below is the the instruction that describes the task:
### Input:
Add custom field to Group object.
.. note:: The key must be the exact name required by the batch schema.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_key_value('fileName', 'something.pdf')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data.
### Response:
def add_key_value(self, key, value):
"""Add custom field to Group object.
.. note:: The key must be the exact name required by the batch schema.
Example::
document = tcex.batch.group('Document', 'My Document')
document.add_key_value('fileName', 'something.pdf')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data.
"""
key = self._metadata_map.get(key, key)
if key in ['dateAdded', 'eventDate', 'firstSeen', 'publishDate']:
self._group_data[key] = self._utils.format_datetime(
value, date_format='%Y-%m-%dT%H:%M:%SZ'
)
elif key == 'file_content':
# file content arg is not part of Group JSON
pass
else:
self._group_data[key] = value |
def run_da(self, rs, overwrite=True, skip_existing=False, path=None,
chunksize=None):
"""Compute timestamps for current populations."""
if path is None:
path = str(self.S.store.filepath.parent)
kwargs = dict(rs=rs, overwrite=overwrite, path=path,
timeslice=self.timeslice, skip_existing=skip_existing)
if chunksize is not None:
kwargs['chunksize'] = chunksize
header = ' - Mixture Simulation:'
# Donor timestamps hash is from the input RandomState
self._calc_hash_da(rs)
print('%s Donor + Acceptor timestamps - %s' %
(header, ctime()), flush=True)
self.S.simulate_timestamps_mix_da(
max_rates_d = self.em_rates_d,
max_rates_a = self.em_rates_a,
populations = self.populations,
bg_rate_d = self.bg_rate_d,
bg_rate_a = self.bg_rate_a,
**kwargs)
print('\n%s Completed. %s' % (header, ctime()), flush=True) | Compute timestamps for current populations. | Below is the the instruction that describes the task:
### Input:
Compute timestamps for current populations.
### Response:
def run_da(self, rs, overwrite=True, skip_existing=False, path=None,
chunksize=None):
"""Compute timestamps for current populations."""
if path is None:
path = str(self.S.store.filepath.parent)
kwargs = dict(rs=rs, overwrite=overwrite, path=path,
timeslice=self.timeslice, skip_existing=skip_existing)
if chunksize is not None:
kwargs['chunksize'] = chunksize
header = ' - Mixture Simulation:'
# Donor timestamps hash is from the input RandomState
self._calc_hash_da(rs)
print('%s Donor + Acceptor timestamps - %s' %
(header, ctime()), flush=True)
self.S.simulate_timestamps_mix_da(
max_rates_d = self.em_rates_d,
max_rates_a = self.em_rates_a,
populations = self.populations,
bg_rate_d = self.bg_rate_d,
bg_rate_a = self.bg_rate_a,
**kwargs)
print('\n%s Completed. %s' % (header, ctime()), flush=True) |
def consume(self, length):
"""
>>> OutBuffer().add(b"spam").consume(2).getvalue() == b"am"
True
@type length: int
@returns: self
"""
self.buff = io.BytesIO(self.getvalue()[length:])
return self | >>> OutBuffer().add(b"spam").consume(2).getvalue() == b"am"
True
@type length: int
@returns: self | Below is the the instruction that describes the task:
### Input:
>>> OutBuffer().add(b"spam").consume(2).getvalue() == b"am"
True
@type length: int
@returns: self
### Response:
def consume(self, length):
"""
>>> OutBuffer().add(b"spam").consume(2).getvalue() == b"am"
True
@type length: int
@returns: self
"""
self.buff = io.BytesIO(self.getvalue()[length:])
return self |
def walk(dispatcher, node, definition=None):
"""
The default, standalone walk function following the standard
argument ordering for the unparsing walkers.
Arguments:
dispatcher
a Dispatcher instance, defined earlier in this module. This
instance will dispatch out the correct callable for the various
object types encountered throughout this recursive function.
node
the starting Node from asttypes.
definition
a standalone definition tuple to start working on the node with;
if none is provided, an initial definition will be looked up
using the dispatcher with the node for the generation of output.
While the dispatcher object is able to provide the lookup directly,
this extra definition argument allow more flexibility in having
Token subtypes being able to provide specific definitions also that
may be required, such as the generation of optional rendering
output.
"""
# The inner walk function - this is actually exposed to the token
# rule objects so they can also make use of it to process the node
# with the dispatcher.
nodes = []
sourcepath_stack = [NotImplemented]
def _walk(dispatcher, node, definition=None, token=None):
if not isinstance(node, Node):
for fragment in dispatcher.token(
token, nodes[-1], node, sourcepath_stack):
yield fragment
return
push = bool(node.sourcepath)
if push:
sourcepath_stack.append(node.sourcepath)
nodes.append(node)
if definition is None:
definition = dispatcher.get_optimized_definition(node)
for rule in definition:
for chunk in rule(_walk, dispatcher, node):
yield chunk
nodes.pop(-1)
if push:
sourcepath_stack.pop(-1)
# Format layout markers are not handled immediately in the walk -
# they will simply be buffered so that a collection of them can be
# handled at once.
def process_layouts(layout_rule_chunks, last_chunk, chunk):
before_text = last_chunk.text if last_chunk else None
after_text = chunk.text if chunk else None
# the text that was yielded by the previous layout handler
prev_text = None
# While Layout rules in a typical definition are typically
# interspersed with Tokens, certain assumptions with how the
# Layouts are specified within there will fail when Tokens fail
# to generate anything for any reason. However, the dispatcher
# instance will be able to accept and resolve a tuple of Layouts
# to some handler function, so that a form of normalization can
# be done. For instance, an (Indent, Newline, Dedent) can
# simply be resolved to no operations. To achieve this, iterate
# through the layout_rule_chunks and generate a normalized form
# for the final handling to happen.
# the preliminary stack that will be cleared whenever a
# normalized layout rule chunk is generated.
lrcs_stack = []
# first pass: generate both the normalized/finalized lrcs.
for lrc in layout_rule_chunks:
lrcs_stack.append(lrc)
# check every single chunk from left to right...
for idx in range(len(lrcs_stack)):
rule = tuple(lrc.rule for lrc in lrcs_stack[idx:])
handler = dispatcher.layout(rule)
if handler is not NotImplemented:
# not manipulating lrsc_stack from within the same
# for loop that it is being iterated upon
break
else:
# which continues back to the top of the outer for loop
continue
# So a handler is found from inside the rules; extend the
# chunks from the stack that didn't get normalized, and
# generate a new layout rule chunk.
lrcs_stack[:] = lrcs_stack[:idx]
lrcs_stack.append(LayoutChunk(
rule, handler,
layout_rule_chunks[idx].node,
))
# second pass: now the processing can be done.
for lr_chunk in lrcs_stack:
gen = lr_chunk.handler(
dispatcher, lr_chunk.node, before_text, after_text, prev_text)
if not gen:
continue
for chunk_from_layout in gen:
yield chunk_from_layout
prev_text = chunk_from_layout.text
# The top level walker implementation
def walk():
last_chunk = None
layout_rule_chunks = []
for chunk in _walk(dispatcher, node, definition):
if isinstance(chunk, LayoutChunk):
layout_rule_chunks.append(chunk)
else:
# process layout rule chunks that had been cached.
for chunk_from_layout in process_layouts(
layout_rule_chunks, last_chunk, chunk):
yield chunk_from_layout
layout_rule_chunks[:] = []
yield chunk
last_chunk = chunk
# process the remaining layout rule chunks.
for chunk_from_layout in process_layouts(
layout_rule_chunks, last_chunk, None):
yield chunk_from_layout
for chunk in walk():
yield chunk | The default, standalone walk function following the standard
argument ordering for the unparsing walkers.
Arguments:
dispatcher
a Dispatcher instance, defined earlier in this module. This
instance will dispatch out the correct callable for the various
object types encountered throughout this recursive function.
node
the starting Node from asttypes.
definition
a standalone definition tuple to start working on the node with;
if none is provided, an initial definition will be looked up
using the dispatcher with the node for the generation of output.
While the dispatcher object is able to provide the lookup directly,
this extra definition argument allow more flexibility in having
Token subtypes being able to provide specific definitions also that
may be required, such as the generation of optional rendering
output. | Below is the the instruction that describes the task:
### Input:
The default, standalone walk function following the standard
argument ordering for the unparsing walkers.
Arguments:
dispatcher
a Dispatcher instance, defined earlier in this module. This
instance will dispatch out the correct callable for the various
object types encountered throughout this recursive function.
node
the starting Node from asttypes.
definition
a standalone definition tuple to start working on the node with;
if none is provided, an initial definition will be looked up
using the dispatcher with the node for the generation of output.
While the dispatcher object is able to provide the lookup directly,
this extra definition argument allow more flexibility in having
Token subtypes being able to provide specific definitions also that
may be required, such as the generation of optional rendering
output.
### Response:
def walk(dispatcher, node, definition=None):
"""
The default, standalone walk function following the standard
argument ordering for the unparsing walkers.
Arguments:
dispatcher
a Dispatcher instance, defined earlier in this module. This
instance will dispatch out the correct callable for the various
object types encountered throughout this recursive function.
node
the starting Node from asttypes.
definition
a standalone definition tuple to start working on the node with;
if none is provided, an initial definition will be looked up
using the dispatcher with the node for the generation of output.
While the dispatcher object is able to provide the lookup directly,
this extra definition argument allow more flexibility in having
Token subtypes being able to provide specific definitions also that
may be required, such as the generation of optional rendering
output.
"""
# The inner walk function - this is actually exposed to the token
# rule objects so they can also make use of it to process the node
# with the dispatcher.
nodes = []
sourcepath_stack = [NotImplemented]
def _walk(dispatcher, node, definition=None, token=None):
if not isinstance(node, Node):
for fragment in dispatcher.token(
token, nodes[-1], node, sourcepath_stack):
yield fragment
return
push = bool(node.sourcepath)
if push:
sourcepath_stack.append(node.sourcepath)
nodes.append(node)
if definition is None:
definition = dispatcher.get_optimized_definition(node)
for rule in definition:
for chunk in rule(_walk, dispatcher, node):
yield chunk
nodes.pop(-1)
if push:
sourcepath_stack.pop(-1)
# Format layout markers are not handled immediately in the walk -
# they will simply be buffered so that a collection of them can be
# handled at once.
def process_layouts(layout_rule_chunks, last_chunk, chunk):
before_text = last_chunk.text if last_chunk else None
after_text = chunk.text if chunk else None
# the text that was yielded by the previous layout handler
prev_text = None
# While Layout rules in a typical definition are typically
# interspersed with Tokens, certain assumptions with how the
# Layouts are specified within there will fail when Tokens fail
# to generate anything for any reason. However, the dispatcher
# instance will be able to accept and resolve a tuple of Layouts
# to some handler function, so that a form of normalization can
# be done. For instance, an (Indent, Newline, Dedent) can
# simply be resolved to no operations. To achieve this, iterate
# through the layout_rule_chunks and generate a normalized form
# for the final handling to happen.
# the preliminary stack that will be cleared whenever a
# normalized layout rule chunk is generated.
lrcs_stack = []
# first pass: generate both the normalized/finalized lrcs.
for lrc in layout_rule_chunks:
lrcs_stack.append(lrc)
# check every single chunk from left to right...
for idx in range(len(lrcs_stack)):
rule = tuple(lrc.rule for lrc in lrcs_stack[idx:])
handler = dispatcher.layout(rule)
if handler is not NotImplemented:
# not manipulating lrsc_stack from within the same
# for loop that it is being iterated upon
break
else:
# which continues back to the top of the outer for loop
continue
# So a handler is found from inside the rules; extend the
# chunks from the stack that didn't get normalized, and
# generate a new layout rule chunk.
lrcs_stack[:] = lrcs_stack[:idx]
lrcs_stack.append(LayoutChunk(
rule, handler,
layout_rule_chunks[idx].node,
))
# second pass: now the processing can be done.
for lr_chunk in lrcs_stack:
gen = lr_chunk.handler(
dispatcher, lr_chunk.node, before_text, after_text, prev_text)
if not gen:
continue
for chunk_from_layout in gen:
yield chunk_from_layout
prev_text = chunk_from_layout.text
# The top level walker implementation
def walk():
last_chunk = None
layout_rule_chunks = []
for chunk in _walk(dispatcher, node, definition):
if isinstance(chunk, LayoutChunk):
layout_rule_chunks.append(chunk)
else:
# process layout rule chunks that had been cached.
for chunk_from_layout in process_layouts(
layout_rule_chunks, last_chunk, chunk):
yield chunk_from_layout
layout_rule_chunks[:] = []
yield chunk
last_chunk = chunk
# process the remaining layout rule chunks.
for chunk_from_layout in process_layouts(
layout_rule_chunks, last_chunk, None):
yield chunk_from_layout
for chunk in walk():
yield chunk |
def diff(self, path_a, path_b):
""" Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source)
"""
path_a = path_a.rstrip("/")
path_b = path_b.rstrip("/")
if not self.exists(path_a) or not self.exists(path_b):
return
if not self.equal(path_a, path_b):
yield 0, "/"
seen = set()
len_a = len(path_a)
len_b = len(path_b)
# first, check what's missing & changed in dst
for child_a, level in self.tree(path_a, 0, True):
child_sub = child_a[len_a + 1:]
child_b = os.path.join(path_b, child_sub)
if not self.exists(child_b):
yield -1, child_sub
else:
if not self.equal(child_a, child_b):
yield 0, child_sub
seen.add(child_sub)
# now, check what's new in dst
for child_b, level in self.tree(path_b, 0, True):
child_sub = child_b[len_b + 1:]
if child_sub not in seen:
yield 1, child_sub | Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source) | Below is the the instruction that describes the task:
### Input:
Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source)
### Response:
def diff(self, path_a, path_b):
""" Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source)
"""
path_a = path_a.rstrip("/")
path_b = path_b.rstrip("/")
if not self.exists(path_a) or not self.exists(path_b):
return
if not self.equal(path_a, path_b):
yield 0, "/"
seen = set()
len_a = len(path_a)
len_b = len(path_b)
# first, check what's missing & changed in dst
for child_a, level in self.tree(path_a, 0, True):
child_sub = child_a[len_a + 1:]
child_b = os.path.join(path_b, child_sub)
if not self.exists(child_b):
yield -1, child_sub
else:
if not self.equal(child_a, child_b):
yield 0, child_sub
seen.add(child_sub)
# now, check what's new in dst
for child_b, level in self.tree(path_b, 0, True):
child_sub = child_b[len_b + 1:]
if child_sub not in seen:
yield 1, child_sub |
def resource_create_ticket(self, token, id, scopes, **kwargs):
"""
Create a ticket form permission to resource.
https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_permission_api_papi
:param str token: user access token
:param str id: resource id
:param list scopes: scopes access is wanted
:param dict claims: (optional)
:rtype: dict
"""
data = dict(resource_id=id, resource_scopes=scopes, **kwargs)
return self._realm.client.post(
self.well_known['permission_endpoint'],
data=self._dumps([data]),
headers=self.get_headers(token)
) | Create a ticket form permission to resource.
https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_permission_api_papi
:param str token: user access token
:param str id: resource id
:param list scopes: scopes access is wanted
:param dict claims: (optional)
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Create a ticket form permission to resource.
https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_permission_api_papi
:param str token: user access token
:param str id: resource id
:param list scopes: scopes access is wanted
:param dict claims: (optional)
:rtype: dict
### Response:
def resource_create_ticket(self, token, id, scopes, **kwargs):
"""
Create a ticket form permission to resource.
https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_permission_api_papi
:param str token: user access token
:param str id: resource id
:param list scopes: scopes access is wanted
:param dict claims: (optional)
:rtype: dict
"""
data = dict(resource_id=id, resource_scopes=scopes, **kwargs)
return self._realm.client.post(
self.well_known['permission_endpoint'],
data=self._dumps([data]),
headers=self.get_headers(token)
) |
def get_last_nonce(app, key, nonce):
"""
Get the last_nonce used by the given key from the SQLAlchemy database.
Update the last_nonce to nonce at the same time.
:param str key: the public key the nonce belongs to
:param int nonce: the last nonce used by this key
"""
uk = ses.query(um.UserKey).filter(um.UserKey.key==key)\
.filter(um.UserKey.last_nonce<nonce * 1000).first()
if not uk:
return None
lastnonce = copy.copy(uk.last_nonce)
# TODO Update DB record in same query as above, if possible
uk.last_nonce = nonce * 1000
try:
ses.commit()
except Exception as e:
current_app.logger.exception(e)
ses.rollback()
ses.flush()
return lastnonce | Get the last_nonce used by the given key from the SQLAlchemy database.
Update the last_nonce to nonce at the same time.
:param str key: the public key the nonce belongs to
:param int nonce: the last nonce used by this key | Below is the the instruction that describes the task:
### Input:
Get the last_nonce used by the given key from the SQLAlchemy database.
Update the last_nonce to nonce at the same time.
:param str key: the public key the nonce belongs to
:param int nonce: the last nonce used by this key
### Response:
def get_last_nonce(app, key, nonce):
"""
Get the last_nonce used by the given key from the SQLAlchemy database.
Update the last_nonce to nonce at the same time.
:param str key: the public key the nonce belongs to
:param int nonce: the last nonce used by this key
"""
uk = ses.query(um.UserKey).filter(um.UserKey.key==key)\
.filter(um.UserKey.last_nonce<nonce * 1000).first()
if not uk:
return None
lastnonce = copy.copy(uk.last_nonce)
# TODO Update DB record in same query as above, if possible
uk.last_nonce = nonce * 1000
try:
ses.commit()
except Exception as e:
current_app.logger.exception(e)
ses.rollback()
ses.flush()
return lastnonce |
def clusters(self):
"""Instance depends on the API version:
* 2018-01-01-preview: :class:`ClustersOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ClustersOperations>`
"""
api_version = self._get_api_version('clusters')
if api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import ClustersOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-01-01-preview: :class:`ClustersOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ClustersOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2018-01-01-preview: :class:`ClustersOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ClustersOperations>`
### Response:
def clusters(self):
"""Instance depends on the API version:
* 2018-01-01-preview: :class:`ClustersOperations<azure.mgmt.eventhub.v2018_01_01_preview.operations.ClustersOperations>`
"""
api_version = self._get_api_version('clusters')
if api_version == '2018-01-01-preview':
from .v2018_01_01_preview.operations import ClustersOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
conf,
nesting_key=None):
'''
Get pillar data from Vault for the configuration ``conf``.
'''
comps = conf.split()
paths = [comp for comp in comps if comp.startswith('path=')]
if not paths:
log.error('"%s" is not a valid Vault ext_pillar config', conf)
return {}
vault_pillar = {}
try:
path = paths[0].replace('path=', '')
path = path.format(**{'minion': minion_id})
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('GET', url)
if response.status_code == 200:
vault_pillar = response.json().get('data', {})
else:
log.info('Vault secret not found for: %s', path)
except KeyError:
log.error('No such path in Vault: %s', path)
if nesting_key:
vault_pillar = {nesting_key: vault_pillar}
return vault_pillar | Get pillar data from Vault for the configuration ``conf``. | Below is the the instruction that describes the task:
### Input:
Get pillar data from Vault for the configuration ``conf``.
### Response:
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
conf,
nesting_key=None):
'''
Get pillar data from Vault for the configuration ``conf``.
'''
comps = conf.split()
paths = [comp for comp in comps if comp.startswith('path=')]
if not paths:
log.error('"%s" is not a valid Vault ext_pillar config', conf)
return {}
vault_pillar = {}
try:
path = paths[0].replace('path=', '')
path = path.format(**{'minion': minion_id})
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('GET', url)
if response.status_code == 200:
vault_pillar = response.json().get('data', {})
else:
log.info('Vault secret not found for: %s', path)
except KeyError:
log.error('No such path in Vault: %s', path)
if nesting_key:
vault_pillar = {nesting_key: vault_pillar}
return vault_pillar |
def nodeSatisfiesValues(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool:
""" `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v.
"""
if nc.values is None:
return True
else:
if any(_nodeSatisfiesValue(cntxt, n, vsv) for vsv in nc.values):
return True
else:
cntxt.fail_reason = f"Node: {cntxt.n3_mapper.n3(n)} not in value set:\n\t " \
f"{as_json(cntxt.type_last(nc), indent=None)[:60]}..."
return False | `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v. | Below is the the instruction that describes the task:
### Input:
`5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v.
### Response:
def nodeSatisfiesValues(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool:
""" `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_
For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v.
"""
if nc.values is None:
return True
else:
if any(_nodeSatisfiesValue(cntxt, n, vsv) for vsv in nc.values):
return True
else:
cntxt.fail_reason = f"Node: {cntxt.n3_mapper.n3(n)} not in value set:\n\t " \
f"{as_json(cntxt.type_last(nc), indent=None)[:60]}..."
return False |
def set_image(self, image):
"""
Update the current comparison (real) image
"""
if isinstance(image, np.ndarray):
image = util.Image(image)
if isinstance(image, util.NullImage):
self.model_as_data = True
else:
self.model_as_data = False
self.image = image
self._data = self.image.get_padded_image(self.pad)
# set up various slicers and Tiles associated with the image and pad
self.oshape = util.Tile(self._data.shape)
self.ishape = self.oshape.pad(-self.pad)
self.inner = self.ishape.slicer
for c in self.comps:
c.set_shape(self.oshape, self.ishape)
self._model = np.zeros(self._data.shape, dtype=np.float64)
self._residuals = np.zeros(self._data.shape, dtype=np.float64)
self.calculate_model() | Update the current comparison (real) image | Below is the the instruction that describes the task:
### Input:
Update the current comparison (real) image
### Response:
def set_image(self, image):
"""
Update the current comparison (real) image
"""
if isinstance(image, np.ndarray):
image = util.Image(image)
if isinstance(image, util.NullImage):
self.model_as_data = True
else:
self.model_as_data = False
self.image = image
self._data = self.image.get_padded_image(self.pad)
# set up various slicers and Tiles associated with the image and pad
self.oshape = util.Tile(self._data.shape)
self.ishape = self.oshape.pad(-self.pad)
self.inner = self.ishape.slicer
for c in self.comps:
c.set_shape(self.oshape, self.ishape)
self._model = np.zeros(self._data.shape, dtype=np.float64)
self._residuals = np.zeros(self._data.shape, dtype=np.float64)
self.calculate_model() |
def directories(self):
""" Return the names of directories to be created. """
directories_description = [
self.project_name,
self.project_name + '/conf',
self.project_name + '/static',
]
return directories_description | Return the names of directories to be created. | Below is the the instruction that describes the task:
### Input:
Return the names of directories to be created.
### Response:
def directories(self):
""" Return the names of directories to be created. """
directories_description = [
self.project_name,
self.project_name + '/conf',
self.project_name + '/static',
]
return directories_description |
def _get_collection_restrictions(collection):
"""Get all restrictions for a given collection, users and fireroles."""
try:
from invenio.dbquery import run_sql
from invenio.access_control_firerole import compile_role_definition
except ImportError:
from invenio.modules.access.firerole import compile_role_definition
from invenio.legacy.dbquery import run_sql
res = run_sql(
'SELECT r.firerole_def_src, email '
'FROM accROLE as r '
'JOIN accROLE_accACTION_accARGUMENT ON r.id=id_accROLE '
'JOIN accARGUMENT AS a ON a.id=id_accARGUMENT '
'JOIN user_accROLE AS u ON r.id=u.id_accROLE '
'JOIN user ON user.id=u.id_user '
'WHERE a.keyword="collection" AND '
'a.value=%s AND '
'id_accACTION=(select id from accACTION where name="viewrestrcoll")',
(collection, ), run_on_slave=True
)
fireroles = set()
users = set()
for f, u in res:
fireroles.add(compile_role_definition(f))
users.add(u)
return {'fireroles': list(fireroles), 'users': users} | Get all restrictions for a given collection, users and fireroles. | Below is the the instruction that describes the task:
### Input:
Get all restrictions for a given collection, users and fireroles.
### Response:
def _get_collection_restrictions(collection):
"""Get all restrictions for a given collection, users and fireroles."""
try:
from invenio.dbquery import run_sql
from invenio.access_control_firerole import compile_role_definition
except ImportError:
from invenio.modules.access.firerole import compile_role_definition
from invenio.legacy.dbquery import run_sql
res = run_sql(
'SELECT r.firerole_def_src, email '
'FROM accROLE as r '
'JOIN accROLE_accACTION_accARGUMENT ON r.id=id_accROLE '
'JOIN accARGUMENT AS a ON a.id=id_accARGUMENT '
'JOIN user_accROLE AS u ON r.id=u.id_accROLE '
'JOIN user ON user.id=u.id_user '
'WHERE a.keyword="collection" AND '
'a.value=%s AND '
'id_accACTION=(select id from accACTION where name="viewrestrcoll")',
(collection, ), run_on_slave=True
)
fireroles = set()
users = set()
for f, u in res:
fireroles.add(compile_role_definition(f))
users.add(u)
return {'fireroles': list(fireroles), 'users': users} |
def is_in_interval(n, l, r, border = 'included'):
"""
Checks whether a number is inside the interval l, r.
"""
if 'included' == border:
return (n >= l) and (n <= r)
elif 'excluded' == border:
return (n > l) and (n < r)
else:
raise ValueError('borders must be either \'included\' or \'excluded\'') | Checks whether a number is inside the interval l, r. | Below is the the instruction that describes the task:
### Input:
Checks whether a number is inside the interval l, r.
### Response:
def is_in_interval(n, l, r, border = 'included'):
"""
Checks whether a number is inside the interval l, r.
"""
if 'included' == border:
return (n >= l) and (n <= r)
elif 'excluded' == border:
return (n > l) and (n < r)
else:
raise ValueError('borders must be either \'included\' or \'excluded\'') |
def save_file(data_file, data, dry_run=None):
"""Writes JSON data to data file."""
if dry_run:
return
with open(data_file, 'w', encoding='utf-8') as f:
if sys.version_info > (3, 0):
f.write(json.dumps(data))
else:
f.write(json.dumps(data).decode('utf-8')) | Writes JSON data to data file. | Below is the the instruction that describes the task:
### Input:
Writes JSON data to data file.
### Response:
def save_file(data_file, data, dry_run=None):
"""Writes JSON data to data file."""
if dry_run:
return
with open(data_file, 'w', encoding='utf-8') as f:
if sys.version_info > (3, 0):
f.write(json.dumps(data))
else:
f.write(json.dumps(data).decode('utf-8')) |
def export(self):
"""
Returns a representation of the Mechanism Name which is suitable for direct string
comparison against other exported Mechanism Names. Its form is defined in the GSSAPI
specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with
the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`.
:returns: an exported bytestring representation of this mechanism name
:rtype: bytes
"""
minor_status = ffi.new('OM_uint32[1]')
output_buffer = ffi.new('gss_buffer_desc[1]')
retval = C.gss_export_name(
minor_status,
self._name[0],
output_buffer
)
try:
if GSS_ERROR(retval):
if minor_status[0] and self._mech_type:
raise _exception_for_status(retval, minor_status[0], self._mech_type)
else:
raise _exception_for_status(retval, minor_status[0])
return _buf_to_str(output_buffer[0])
finally:
if output_buffer[0].length != 0:
C.gss_release_buffer(minor_status, output_buffer) | Returns a representation of the Mechanism Name which is suitable for direct string
comparison against other exported Mechanism Names. Its form is defined in the GSSAPI
specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with
the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`.
:returns: an exported bytestring representation of this mechanism name
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Returns a representation of the Mechanism Name which is suitable for direct string
comparison against other exported Mechanism Names. Its form is defined in the GSSAPI
specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with
the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`.
:returns: an exported bytestring representation of this mechanism name
:rtype: bytes
### Response:
def export(self):
"""
Returns a representation of the Mechanism Name which is suitable for direct string
comparison against other exported Mechanism Names. Its form is defined in the GSSAPI
specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with
the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`.
:returns: an exported bytestring representation of this mechanism name
:rtype: bytes
"""
minor_status = ffi.new('OM_uint32[1]')
output_buffer = ffi.new('gss_buffer_desc[1]')
retval = C.gss_export_name(
minor_status,
self._name[0],
output_buffer
)
try:
if GSS_ERROR(retval):
if minor_status[0] and self._mech_type:
raise _exception_for_status(retval, minor_status[0], self._mech_type)
else:
raise _exception_for_status(retval, minor_status[0])
return _buf_to_str(output_buffer[0])
finally:
if output_buffer[0].length != 0:
C.gss_release_buffer(minor_status, output_buffer) |
def toggle(self, key):
""" Toggles a boolean key """
val = self[key]
assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val)
self.pref_update(key, not val) | Toggles a boolean key | Below is the the instruction that describes the task:
### Input:
Toggles a boolean key
### Response:
def toggle(self, key):
""" Toggles a boolean key """
val = self[key]
assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val)
self.pref_update(key, not val) |
def access_to_sympy(self, var_name, access):
"""
Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses.
"""
base_sizes = self.variables[var_name][1]
expr = sympy.Number(0)
for dimension, a in enumerate(access):
base_size = reduce(operator.mul, base_sizes[dimension+1:], sympy.Integer(1))
expr += base_size*a
return expr | Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses. | Below is the the instruction that describes the task:
### Input:
Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses.
### Response:
def access_to_sympy(self, var_name, access):
"""
Transform a (multidimensional) variable access to a flattend sympy expression.
Also works with flat array accesses.
"""
base_sizes = self.variables[var_name][1]
expr = sympy.Number(0)
for dimension, a in enumerate(access):
base_size = reduce(operator.mul, base_sizes[dimension+1:], sympy.Integer(1))
expr += base_size*a
return expr |
def interp_like(self, other, method='linear', assume_sorted=False,
kwargs={}):
"""Interpolate this object onto the coordinates of another object,
filling the out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
Returns
-------
interpolated: xr.Dataset
Another dataset by interpolating this dataset's data along the
coordinates of the other object.
Notes
-----
scipy is required.
If the dataset has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
See Also
--------
Dataset.interp
Dataset.reindex_like
"""
coords = alignment.reindex_like_indexers(self, other)
numeric_coords = OrderedDict()
object_coords = OrderedDict()
for k, v in coords.items():
if v.dtype.kind in 'uifcMm':
numeric_coords[k] = v
else:
object_coords[k] = v
ds = self
if object_coords:
# We do not support interpolation along object coordinate.
# reindex instead.
ds = self.reindex(object_coords)
return ds.interp(numeric_coords, method, assume_sorted, kwargs) | Interpolate this object onto the coordinates of another object,
filling the out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
Returns
-------
interpolated: xr.Dataset
Another dataset by interpolating this dataset's data along the
coordinates of the other object.
Notes
-----
scipy is required.
If the dataset has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
See Also
--------
Dataset.interp
Dataset.reindex_like | Below is the the instruction that describes the task:
### Input:
Interpolate this object onto the coordinates of another object,
filling the out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
Returns
-------
interpolated: xr.Dataset
Another dataset by interpolating this dataset's data along the
coordinates of the other object.
Notes
-----
scipy is required.
If the dataset has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
See Also
--------
Dataset.interp
Dataset.reindex_like
### Response:
def interp_like(self, other, method='linear', assume_sorted=False,
kwargs={}):
"""Interpolate this object onto the coordinates of another object,
filling the out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
Returns
-------
interpolated: xr.Dataset
Another dataset by interpolating this dataset's data along the
coordinates of the other object.
Notes
-----
scipy is required.
If the dataset has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
See Also
--------
Dataset.interp
Dataset.reindex_like
"""
coords = alignment.reindex_like_indexers(self, other)
numeric_coords = OrderedDict()
object_coords = OrderedDict()
for k, v in coords.items():
if v.dtype.kind in 'uifcMm':
numeric_coords[k] = v
else:
object_coords[k] = v
ds = self
if object_coords:
# We do not support interpolation along object coordinate.
# reindex instead.
ds = self.reindex(object_coords)
return ds.interp(numeric_coords, method, assume_sorted, kwargs) |
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by)) | Full-text search using selected `term`. | Below is the the instruction that describes the task:
### Input:
Full-text search using selected `term`.
### Response:
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by)) |
def present(name,
source,
aliases=None,
public=None,
auto_update=None,
remote_addr=None,
cert=None,
key=None,
verify_cert=True):
'''
Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
if aliases is None:
aliases = []
# Create a copy of aliases, since we're modifying it here
aliases = aliases[:]
ret = {
'name': name,
'source': source,
'aliases': aliases,
'public': public,
'auto_update': auto_update,
'remote_addr': remote_addr,
'cert': cert,
'key': key,
'verify_cert': verify_cert,
'changes': {}
}
image = None
try:
image = __salt__['lxd.image_get_by_alias'](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Image not found
pass
if image is None:
if __opts__['test']:
# Test is on, just return that we would create the image
msg = 'Would create the image "{0}"'.format(name)
ret['changes'] = {'created': msg}
return _unchanged(ret, msg)
try:
if source['type'] == 'lxd':
image = __salt__['lxd.image_copy_lxd'](
source['name'],
src_remote_addr=source['remote_addr'],
src_cert=source['cert'],
src_key=source['key'],
src_verify_cert=source.get('verify_cert', True),
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=public,
auto_update=auto_update,
_raw=True
)
if source['type'] == 'file':
if 'saltenv' not in source:
source['saltenv'] = __env__
image = __salt__['lxd.image_from_file'](
source['filename'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
saltenv=source['saltenv'],
_raw=True
)
if source['type'] == 'simplestreams':
image = __salt__['lxd.image_from_simplestreams'](
source['server'],
source['name'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
auto_update=False if auto_update is None else auto_update,
_raw=True
)
if source['type'] == 'url':
image = __salt__['lxd.image_from_url'](
source['url'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
auto_update=False if auto_update is None else auto_update,
_raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
# Sync aliases
if name not in aliases:
aliases.append(name)
old_aliases = set([six.text_type(a['name']) for a in image.aliases])
new_aliases = set(map(six.text_type, aliases))
alias_changes = []
# Removed aliases
for k in old_aliases.difference(new_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_delete'](image, k)
alias_changes.append('Removed alias "{0}"'.format(k))
else:
alias_changes.append('Would remove alias "{0}"'.format(k))
# New aliases
for k in new_aliases.difference(old_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_add'](image, k, '')
alias_changes.append('Added alias "{0}"'.format(k))
else:
alias_changes.append('Would add alias "{0}"'.format(k))
if alias_changes:
ret['changes']['aliases'] = alias_changes
# Set public
if public is not None and image.public != public:
if not __opts__['test']:
ret['changes']['public'] = \
'Setting the image public to {0!s}'.format(public)
image.public = public
__salt__['lxd.pylxd_save_object'](image)
else:
ret['changes']['public'] = \
'Would set public to {0!s}'.format(public)
if __opts__['test'] and ret['changes']:
return _unchanged(
ret,
'Would do {0} changes'.format(len(ret['changes'].keys()))
)
return _success(ret, '{0} changes'.format(len(ret['changes'].keys()))) | Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates. | Below is the the instruction that describes the task:
### Input:
Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
### Response:
def present(name,
source,
aliases=None,
public=None,
auto_update=None,
remote_addr=None,
cert=None,
key=None,
verify_cert=True):
'''
Ensure an image exists, copy it else from source
name :
An alias of the image, this is used to check if the image exists and
it will be added as alias to the image on copy/create.
source :
Source dict.
For an LXD to LXD copy:
.. code-block: yaml
source:
type: lxd
name: ubuntu/xenial/amd64 # This can also be a fingerprint.
remote_addr: https://images.linuxcontainers.org:8443
cert: ~/.config/lxd/client.crt
key: ~/.config/lxd/client.key
verify_cert: False
.. attention:
For this kind of remote you also need to provide:
- a https:// remote_addr
- a cert and key
- verify_cert
From file:
.. code-block: yaml
source:
type: file
filename: salt://lxd/files/busybox.tar.xz
saltenv: base
From simplestreams:
.. code-block: yaml
source:
type: simplestreams
server: https://cloud-images.ubuntu.com/releases
name: xenial/amd64
From an URL:
.. code-block: yaml
source:
type: url
url: https://dl.stgraber.org/lxd
aliases :
List of aliases to append, can be empty.
public :
Make this image public available on this instance?
None on source_type LXD means copy source
None on source_type file means False
auto_update :
Try to auto-update from the original source?
None on source_type LXD means copy source
source_type file does not have auto-update.
remote_addr :
An URL to a remote Server, you also have to give cert and key if you
provide remote_addr!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
if aliases is None:
aliases = []
# Create a copy of aliases, since we're modifying it here
aliases = aliases[:]
ret = {
'name': name,
'source': source,
'aliases': aliases,
'public': public,
'auto_update': auto_update,
'remote_addr': remote_addr,
'cert': cert,
'key': key,
'verify_cert': verify_cert,
'changes': {}
}
image = None
try:
image = __salt__['lxd.image_get_by_alias'](
name, remote_addr, cert, key, verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
# Image not found
pass
if image is None:
if __opts__['test']:
# Test is on, just return that we would create the image
msg = 'Would create the image "{0}"'.format(name)
ret['changes'] = {'created': msg}
return _unchanged(ret, msg)
try:
if source['type'] == 'lxd':
image = __salt__['lxd.image_copy_lxd'](
source['name'],
src_remote_addr=source['remote_addr'],
src_cert=source['cert'],
src_key=source['key'],
src_verify_cert=source.get('verify_cert', True),
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=public,
auto_update=auto_update,
_raw=True
)
if source['type'] == 'file':
if 'saltenv' not in source:
source['saltenv'] = __env__
image = __salt__['lxd.image_from_file'](
source['filename'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
saltenv=source['saltenv'],
_raw=True
)
if source['type'] == 'simplestreams':
image = __salt__['lxd.image_from_simplestreams'](
source['server'],
source['name'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
auto_update=False if auto_update is None else auto_update,
_raw=True
)
if source['type'] == 'url':
image = __salt__['lxd.image_from_url'](
source['url'],
remote_addr=remote_addr,
cert=cert,
key=key,
verify_cert=verify_cert,
aliases=aliases,
public=False if public is None else public,
auto_update=False if auto_update is None else auto_update,
_raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
# Sync aliases
if name not in aliases:
aliases.append(name)
old_aliases = set([six.text_type(a['name']) for a in image.aliases])
new_aliases = set(map(six.text_type, aliases))
alias_changes = []
# Removed aliases
for k in old_aliases.difference(new_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_delete'](image, k)
alias_changes.append('Removed alias "{0}"'.format(k))
else:
alias_changes.append('Would remove alias "{0}"'.format(k))
# New aliases
for k in new_aliases.difference(old_aliases):
if not __opts__['test']:
__salt__['lxd.image_alias_add'](image, k, '')
alias_changes.append('Added alias "{0}"'.format(k))
else:
alias_changes.append('Would add alias "{0}"'.format(k))
if alias_changes:
ret['changes']['aliases'] = alias_changes
# Set public
if public is not None and image.public != public:
if not __opts__['test']:
ret['changes']['public'] = \
'Setting the image public to {0!s}'.format(public)
image.public = public
__salt__['lxd.pylxd_save_object'](image)
else:
ret['changes']['public'] = \
'Would set public to {0!s}'.format(public)
if __opts__['test'] and ret['changes']:
return _unchanged(
ret,
'Would do {0} changes'.format(len(ret['changes'].keys()))
)
return _success(ret, '{0} changes'.format(len(ret['changes'].keys()))) |
def add_protein_to_organisms(self, orgprot_list):
'''
Protein factory method.
Iterates through a list of SearchIO hit objects, matches
the accession against SeqRecord features for each organism.
If there is a match, the new Protein object is created and
stored in the protein list of that Organism.
Args
orgprot_list: a list Biopython SearchIO hit objects (I think).
'''
for org in self.organisms:
handle = open(org.genome_path, "rU")
print 'adding proteins to organism', org.accession
try:
seq_record = SeqIO.read(handle, "genbank")
feature_list = []
for id in orgprot_list:
org_id = id.split(',')[0]
prot_id = id.split(',')[1]
if org.accession == org_id:
for feature in seq_record.features:
if feature.type == 'CDS':
feat_prot_acc = feature.qualifiers['protein_id'][0]
if feat_prot_acc == prot_id:
#print 'appending', hit_prot_acc
org.proteins.append(Protein(feature))
del(seq_record)
except ValueError,e:
print 'error for ', org.accession, str(e)
except AssertionError,e:
print 'error for ', org.accession, str(e)
except UnboundLocalError,e:
print 'error for ', org.accession, str(e)
except KeyError,e:
print 'error for ', org.accession, str(e)
handle.close() | Protein factory method.
Iterates through a list of SearchIO hit objects, matches
the accession against SeqRecord features for each organism.
If there is a match, the new Protein object is created and
stored in the protein list of that Organism.
Args
orgprot_list: a list Biopython SearchIO hit objects (I think). | Below is the the instruction that describes the task:
### Input:
Protein factory method.
Iterates through a list of SearchIO hit objects, matches
the accession against SeqRecord features for each organism.
If there is a match, the new Protein object is created and
stored in the protein list of that Organism.
Args
orgprot_list: a list Biopython SearchIO hit objects (I think).
### Response:
def add_protein_to_organisms(self, orgprot_list):
'''
Protein factory method.
Iterates through a list of SearchIO hit objects, matches
the accession against SeqRecord features for each organism.
If there is a match, the new Protein object is created and
stored in the protein list of that Organism.
Args
orgprot_list: a list Biopython SearchIO hit objects (I think).
'''
for org in self.organisms:
handle = open(org.genome_path, "rU")
print 'adding proteins to organism', org.accession
try:
seq_record = SeqIO.read(handle, "genbank")
feature_list = []
for id in orgprot_list:
org_id = id.split(',')[0]
prot_id = id.split(',')[1]
if org.accession == org_id:
for feature in seq_record.features:
if feature.type == 'CDS':
feat_prot_acc = feature.qualifiers['protein_id'][0]
if feat_prot_acc == prot_id:
#print 'appending', hit_prot_acc
org.proteins.append(Protein(feature))
del(seq_record)
except ValueError,e:
print 'error for ', org.accession, str(e)
except AssertionError,e:
print 'error for ', org.accession, str(e)
except UnboundLocalError,e:
print 'error for ', org.accession, str(e)
except KeyError,e:
print 'error for ', org.accession, str(e)
handle.close() |
def callback_handler_install(self, prompt, callback):
u'''bool readline_callback_handler_install ( string prompt, callback callback)
Initializes the readline callback interface and terminal, prints the prompt and returns immediately
'''
self.callback = callback
self.readline_setup(prompt) | u'''bool readline_callback_handler_install ( string prompt, callback callback)
Initializes the readline callback interface and terminal, prints the prompt and returns immediately | Below is the the instruction that describes the task:
### Input:
u'''bool readline_callback_handler_install ( string prompt, callback callback)
Initializes the readline callback interface and terminal, prints the prompt and returns immediately
### Response:
def callback_handler_install(self, prompt, callback):
u'''bool readline_callback_handler_install ( string prompt, callback callback)
Initializes the readline callback interface and terminal, prints the prompt and returns immediately
'''
self.callback = callback
self.readline_setup(prompt) |
def _inject():
""" Copy functions from OpenGL.GL into _pyopengl namespace.
"""
NS = _pyopengl2.__dict__
for glname, ourname in _pyopengl2._functions_to_import:
func = _get_function_from_pyopengl(glname)
NS[ourname] = func | Copy functions from OpenGL.GL into _pyopengl namespace. | Below is the the instruction that describes the task:
### Input:
Copy functions from OpenGL.GL into _pyopengl namespace.
### Response:
def _inject():
""" Copy functions from OpenGL.GL into _pyopengl namespace.
"""
NS = _pyopengl2.__dict__
for glname, ourname in _pyopengl2._functions_to_import:
func = _get_function_from_pyopengl(glname)
NS[ourname] = func |
def show_system_monitor_output_switch_status_switch_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_system_monitor = ET.Element("show_system_monitor")
config = show_system_monitor
output = ET.SubElement(show_system_monitor, "output")
switch_status = ET.SubElement(output, "switch-status")
switch_state = ET.SubElement(switch_status, "switch-state")
switch_state.text = kwargs.pop('switch_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_system_monitor_output_switch_status_switch_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_system_monitor = ET.Element("show_system_monitor")
config = show_system_monitor
output = ET.SubElement(show_system_monitor, "output")
switch_status = ET.SubElement(output, "switch-status")
switch_state = ET.SubElement(switch_status, "switch-state")
switch_state.text = kwargs.pop('switch_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def usermacro_updateglobal(globalmacroid, value, **kwargs):
'''
Update existing global usermacro.
:param globalmacroid: id of the host usermacro
:param value: new value of the host usermacro
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the update global usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_updateglobal 1 'public'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
params = {}
method = 'usermacro.updateglobal'
params['globalmacroid'] = globalmacroid
params['value'] = value
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['globalmacroids'][0]
else:
raise KeyError
except KeyError:
return ret | Update existing global usermacro.
:param globalmacroid: id of the host usermacro
:param value: new value of the host usermacro
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the update global usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_updateglobal 1 'public' | Below is the the instruction that describes the task:
### Input:
Update existing global usermacro.
:param globalmacroid: id of the host usermacro
:param value: new value of the host usermacro
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the update global usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_updateglobal 1 'public'
### Response:
def usermacro_updateglobal(globalmacroid, value, **kwargs):
'''
Update existing global usermacro.
:param globalmacroid: id of the host usermacro
:param value: new value of the host usermacro
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
return: ID of the update global usermacro.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_updateglobal 1 'public'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
params = {}
method = 'usermacro.updateglobal'
params['globalmacroid'] = globalmacroid
params['value'] = value
params = _params_extend(params, _ignore_name=True, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['globalmacroids'][0]
else:
raise KeyError
except KeyError:
return ret |
def parse_extra_model_fields(extra_model_fields):
"""
Parses the value of EXTRA_MODEL_FIELDS, grouping the entries by model
and instantiating the extra fields. Returns a sequence of tuples of
the form (model_key, fields) where model_key is a pair of app_label,
model_name and fields is a list of (field_name, field_instance) pairs.
"""
fields = defaultdict(list)
for entry in extra_model_fields:
model_key, field_name = parse_field_path(entry[0])
field_class = import_field(entry[1])
field_args, field_kwargs = entry[2:]
try:
field = field_class(*field_args, **field_kwargs)
except TypeError as e:
raise ImproperlyConfigured(
"The EXTRA_MODEL_FIELDS setting contains arguments for the "
"field '%s' which could not be applied: %s" % (entry[1], e))
fields[model_key].append((field_name, field))
return fields | Parses the value of EXTRA_MODEL_FIELDS, grouping the entries by model
and instantiating the extra fields. Returns a sequence of tuples of
the form (model_key, fields) where model_key is a pair of app_label,
model_name and fields is a list of (field_name, field_instance) pairs. | Below is the the instruction that describes the task:
### Input:
Parses the value of EXTRA_MODEL_FIELDS, grouping the entries by model
and instantiating the extra fields. Returns a sequence of tuples of
the form (model_key, fields) where model_key is a pair of app_label,
model_name and fields is a list of (field_name, field_instance) pairs.
### Response:
def parse_extra_model_fields(extra_model_fields):
"""
Parses the value of EXTRA_MODEL_FIELDS, grouping the entries by model
and instantiating the extra fields. Returns a sequence of tuples of
the form (model_key, fields) where model_key is a pair of app_label,
model_name and fields is a list of (field_name, field_instance) pairs.
"""
fields = defaultdict(list)
for entry in extra_model_fields:
model_key, field_name = parse_field_path(entry[0])
field_class = import_field(entry[1])
field_args, field_kwargs = entry[2:]
try:
field = field_class(*field_args, **field_kwargs)
except TypeError as e:
raise ImproperlyConfigured(
"The EXTRA_MODEL_FIELDS setting contains arguments for the "
"field '%s' which could not be applied: %s" % (entry[1], e))
fields[model_key].append((field_name, field))
return fields |
def offset(self, location, dy=0):
""" Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
"""
if not isinstance(location, Location):
# Assume variables passed were dx,dy
location = Location(location, dy)
r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen()
if r is None:
raise ValueError("Specified region is not visible on any screen")
return None
return r | Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same | Below is the the instruction that describes the task:
### Input:
Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
### Response:
def offset(self, location, dy=0):
""" Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
"""
if not isinstance(location, Location):
# Assume variables passed were dx,dy
location = Location(location, dy)
r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen()
if r is None:
raise ValueError("Specified region is not visible on any screen")
return None
return r |
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True | Issues a check execution request. | Below is the the instruction that describes the task:
### Input:
Issues a check execution request.
### Response:
def post_check_request(self, check, subscribers):
"""
Issues a check execution request.
"""
data = {
'check': check,
'subscribers': [subscribers]
}
self._request('POST', '/request', data=json.dumps(data))
return True |
def _add_hash(source):
"""Add a leading hash '#' at the beginning of every line in the source."""
source = '\n'.join('# ' + line.rstrip()
for line in source.splitlines())
return source | Add a leading hash '#' at the beginning of every line in the source. | Below is the the instruction that describes the task:
### Input:
Add a leading hash '#' at the beginning of every line in the source.
### Response:
def _add_hash(source):
"""Add a leading hash '#' at the beginning of every line in the source."""
source = '\n'.join('# ' + line.rstrip()
for line in source.splitlines())
return source |
def text(self, cls = 'current', retaintokenisation=False, previousdelimiter="",strict=False, correctionhandling=CorrectionHandling.CURRENT, normalize_spaces=False):
"""See :meth:`AbstractElement.text`"""
if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
s = previousdelimiter + e.text(cls, retaintokenisation,"", strict, correctionhandling)
if normalize_spaces:
return norm_spaces(s)
else:
return s
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
s = previousdelimiter + e.text(cls, retaintokenisation,"", strict, correctionhandling)
if normalize_spaces:
return norm_spaces(s)
else:
return s
raise NoSuchText | See :meth:`AbstractElement.text` | Below is the the instruction that describes the task:
### Input:
See :meth:`AbstractElement.text`
### Response:
def text(self, cls = 'current', retaintokenisation=False, previousdelimiter="",strict=False, correctionhandling=CorrectionHandling.CURRENT, normalize_spaces=False):
"""See :meth:`AbstractElement.text`"""
if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
s = previousdelimiter + e.text(cls, retaintokenisation,"", strict, correctionhandling)
if normalize_spaces:
return norm_spaces(s)
else:
return s
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
s = previousdelimiter + e.text(cls, retaintokenisation,"", strict, correctionhandling)
if normalize_spaces:
return norm_spaces(s)
else:
return s
raise NoSuchText |
def _rule_compare(rule1, rule2):
'''
Compare the common keys between security group rules against eachother
'''
commonkeys = set(rule1.keys()).intersection(rule2.keys())
for key in commonkeys:
if rule1[key] != rule2[key]:
return False
return True | Compare the common keys between security group rules against eachother | Below is the the instruction that describes the task:
### Input:
Compare the common keys between security group rules against eachother
### Response:
def _rule_compare(rule1, rule2):
'''
Compare the common keys between security group rules against eachother
'''
commonkeys = set(rule1.keys()).intersection(rule2.keys())
for key in commonkeys:
if rule1[key] != rule2[key]:
return False
return True |
def get_tasks(self):
"""Returns an ordered dictionary {task_name: task} of all tasks within this workflow.
:return: Ordered dictionary with key being task_name (str) and an instance of a corresponding task from this
workflow
:rtype: OrderedDict
"""
tasks = collections.OrderedDict()
for dep in self.ordered_dependencies:
tasks[dep.name] = dep.task
return tasks | Returns an ordered dictionary {task_name: task} of all tasks within this workflow.
:return: Ordered dictionary with key being task_name (str) and an instance of a corresponding task from this
workflow
:rtype: OrderedDict | Below is the the instruction that describes the task:
### Input:
Returns an ordered dictionary {task_name: task} of all tasks within this workflow.
:return: Ordered dictionary with key being task_name (str) and an instance of a corresponding task from this
workflow
:rtype: OrderedDict
### Response:
def get_tasks(self):
"""Returns an ordered dictionary {task_name: task} of all tasks within this workflow.
:return: Ordered dictionary with key being task_name (str) and an instance of a corresponding task from this
workflow
:rtype: OrderedDict
"""
tasks = collections.OrderedDict()
for dep in self.ordered_dependencies:
tasks[dep.name] = dep.task
return tasks |
def loudness(self, gain_db=-10.0, reference_level=65.0):
'''Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
'''
if not is_number(gain_db):
raise ValueError('gain_db must be a number.')
if not is_number(reference_level):
raise ValueError('reference_level must be a number')
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75')
effect_args = [
'loudness',
'{:f}'.format(gain_db),
'{:f}'.format(reference_level)
]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self | Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain | Below is the the instruction that describes the task:
### Input:
Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
### Response:
def loudness(self, gain_db=-10.0, reference_level=65.0):
'''Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
'''
if not is_number(gain_db):
raise ValueError('gain_db must be a number.')
if not is_number(reference_level):
raise ValueError('reference_level must be a number')
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75')
effect_args = [
'loudness',
'{:f}'.format(gain_db),
'{:f}'.format(reference_level)
]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self |
def set_amount(self, amount):
"""
Set transaction amount
"""
if amount:
try:
self.IsoMessage.FieldData(4, int(amount))
except ValueError:
self.IsoMessage.FieldData(4, 0)
self.rebuild() | Set transaction amount | Below is the the instruction that describes the task:
### Input:
Set transaction amount
### Response:
def set_amount(self, amount):
"""
Set transaction amount
"""
if amount:
try:
self.IsoMessage.FieldData(4, int(amount))
except ValueError:
self.IsoMessage.FieldData(4, 0)
self.rebuild() |
def hash_pair(first: Keccak256, second: Optional[Keccak256]) -> Keccak256:
""" Computes the keccak hash of the elements ordered topologically.
Since a merkle proof will not include all the elements, but only the path
starting from the leaves up to the root, the order of the elements is not
known by the proof checker. The topological order is used as a
deterministic way of ordering the elements making sure the smart contract
verification and the python code are compatible.
"""
assert first is not None
if second is None:
return first
if first > second:
return sha3(second + first)
return sha3(first + second) | Computes the keccak hash of the elements ordered topologically.
Since a merkle proof will not include all the elements, but only the path
starting from the leaves up to the root, the order of the elements is not
known by the proof checker. The topological order is used as a
deterministic way of ordering the elements making sure the smart contract
verification and the python code are compatible. | Below is the the instruction that describes the task:
### Input:
Computes the keccak hash of the elements ordered topologically.
Since a merkle proof will not include all the elements, but only the path
starting from the leaves up to the root, the order of the elements is not
known by the proof checker. The topological order is used as a
deterministic way of ordering the elements making sure the smart contract
verification and the python code are compatible.
### Response:
def hash_pair(first: Keccak256, second: Optional[Keccak256]) -> Keccak256:
""" Computes the keccak hash of the elements ordered topologically.
Since a merkle proof will not include all the elements, but only the path
starting from the leaves up to the root, the order of the elements is not
known by the proof checker. The topological order is used as a
deterministic way of ordering the elements making sure the smart contract
verification and the python code are compatible.
"""
assert first is not None
if second is None:
return first
if first > second:
return sha3(second + first)
return sha3(first + second) |
def convert_gru(builder, layer, input_names, output_names, keras_layer):
"""Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
hidden_size = keras_layer.output_dim
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
if keras_layer.consume_less not in ['cpu', 'gpu']:
raise ValueError('Cannot convert Keras layer with consume_less = %s' % keras_layer.consume_less)
# Keras: Z R O
# CoreML: Z R O
W_h, W_x, b = ([], [], [])
if keras_layer.consume_less == 'cpu':
W_x.append(keras_layer.get_weights()[0].T)
W_x.append(keras_layer.get_weights()[3].T)
W_x.append(keras_layer.get_weights()[6].T)
W_h.append(keras_layer.get_weights()[1].T)
W_h.append(keras_layer.get_weights()[4].T)
W_h.append(keras_layer.get_weights()[7].T)
b.append(keras_layer.get_weights()[2])
b.append(keras_layer.get_weights()[5])
b.append(keras_layer.get_weights()[8])
else:
print('consume less not implemented')
# Set actication type
inner_activation_str = _get_recurrent_activation_name_from_keras(keras_layer.inner_activation)
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_gru(
name = layer,
W_h = W_h, W_x = W_x, b = b,
input_size = input_size,
hidden_size = hidden_size,
input_names = input_names,
output_names = output_names,
activation = activation_str,
inner_activation = inner_activation_str,
output_all=output_all,
reverse_input = reverse_input) | Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | Below is the the instruction that describes the task:
### Input:
Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
### Response:
def convert_gru(builder, layer, input_names, output_names, keras_layer):
"""Convert a GRU layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
hidden_size = keras_layer.output_dim
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
if keras_layer.consume_less not in ['cpu', 'gpu']:
raise ValueError('Cannot convert Keras layer with consume_less = %s' % keras_layer.consume_less)
# Keras: Z R O
# CoreML: Z R O
W_h, W_x, b = ([], [], [])
if keras_layer.consume_less == 'cpu':
W_x.append(keras_layer.get_weights()[0].T)
W_x.append(keras_layer.get_weights()[3].T)
W_x.append(keras_layer.get_weights()[6].T)
W_h.append(keras_layer.get_weights()[1].T)
W_h.append(keras_layer.get_weights()[4].T)
W_h.append(keras_layer.get_weights()[7].T)
b.append(keras_layer.get_weights()[2])
b.append(keras_layer.get_weights()[5])
b.append(keras_layer.get_weights()[8])
else:
print('consume less not implemented')
# Set actication type
inner_activation_str = _get_recurrent_activation_name_from_keras(keras_layer.inner_activation)
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_gru(
name = layer,
W_h = W_h, W_x = W_x, b = b,
input_size = input_size,
hidden_size = hidden_size,
input_names = input_names,
output_names = output_names,
activation = activation_str,
inner_activation = inner_activation_str,
output_all=output_all,
reverse_input = reverse_input) |
def bifurcation_partition(bif_point):
'''Calculate the partition at a bifurcation point
We first ensure that the input point has only two children.
The number of nodes in each child tree is counted. The partition is
defined as the ratio of the largest number to the smallest number.'''
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return max(n, m) / min(n, m) | Calculate the partition at a bifurcation point
We first ensure that the input point has only two children.
The number of nodes in each child tree is counted. The partition is
defined as the ratio of the largest number to the smallest number. | Below is the the instruction that describes the task:
### Input:
Calculate the partition at a bifurcation point
We first ensure that the input point has only two children.
The number of nodes in each child tree is counted. The partition is
defined as the ratio of the largest number to the smallest number.
### Response:
def bifurcation_partition(bif_point):
'''Calculate the partition at a bifurcation point
We first ensure that the input point has only two children.
The number of nodes in each child tree is counted. The partition is
defined as the ratio of the largest number to the smallest number.'''
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return max(n, m) / min(n, m) |
def print_tally(self):
"""
Prints the final tally to stdout.
"""
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count)) | Prints the final tally to stdout. | Below is the the instruction that describes the task:
### Input:
Prints the final tally to stdout.
### Response:
def print_tally(self):
"""
Prints the final tally to stdout.
"""
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count)) |
def contains_value(self, value):
"""
Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value.
"""
check_not_none(value, "value can't be None")
return self._encode_invoke_on_target_partition(replicated_map_contains_value_codec, value=self._to_data(value)) | Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value. | Below is the the instruction that describes the task:
### Input:
Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value.
### Response:
def contains_value(self, value):
"""
Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value.
"""
check_not_none(value, "value can't be None")
return self._encode_invoke_on_target_partition(replicated_map_contains_value_codec, value=self._to_data(value)) |
def _h_function(self,h):
""" private method for the spherical variogram "h" function
Parameters
----------
h : (float or numpy.ndarray)
distance(s)
Returns
-------
h_function : float or numpy.ndarray
the value of the "h" function implied by the SphVario
"""
hh = h / self.a
h = self.contribution * (1.0 - (hh * (1.5 - (0.5 * hh * hh))))
h[hh > 1.0] = 0.0
return h | private method for the spherical variogram "h" function
Parameters
----------
h : (float or numpy.ndarray)
distance(s)
Returns
-------
h_function : float or numpy.ndarray
the value of the "h" function implied by the SphVario | Below is the the instruction that describes the task:
### Input:
private method for the spherical variogram "h" function
Parameters
----------
h : (float or numpy.ndarray)
distance(s)
Returns
-------
h_function : float or numpy.ndarray
the value of the "h" function implied by the SphVario
### Response:
def _h_function(self,h):
""" private method for the spherical variogram "h" function
Parameters
----------
h : (float or numpy.ndarray)
distance(s)
Returns
-------
h_function : float or numpy.ndarray
the value of the "h" function implied by the SphVario
"""
hh = h / self.a
h = self.contribution * (1.0 - (hh * (1.5 - (0.5 * hh * hh))))
h[hh > 1.0] = 0.0
return h |
def validate(self):
"""Validate that the BinaryComposition is correctly representable."""
_validate_operator_name(self.operator, BinaryComposition.SUPPORTED_OPERATORS)
if not isinstance(self.left, Expression):
raise TypeError(u'Expected Expression left, got: {} {} {}'.format(
type(self.left).__name__, self.left, self))
if not isinstance(self.right, Expression):
raise TypeError(u'Expected Expression right, got: {} {}'.format(
type(self.right).__name__, self.right)) | Validate that the BinaryComposition is correctly representable. | Below is the the instruction that describes the task:
### Input:
Validate that the BinaryComposition is correctly representable.
### Response:
def validate(self):
"""Validate that the BinaryComposition is correctly representable."""
_validate_operator_name(self.operator, BinaryComposition.SUPPORTED_OPERATORS)
if not isinstance(self.left, Expression):
raise TypeError(u'Expected Expression left, got: {} {} {}'.format(
type(self.left).__name__, self.left, self))
if not isinstance(self.right, Expression):
raise TypeError(u'Expected Expression right, got: {} {}'.format(
type(self.right).__name__, self.right)) |
def makeCoreValuesSubqueryCondition(engine, column, values: List[Union[int, str]]):
""" Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(engine):
return column.in_(values)
if not isMssqlDialect(engine):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
return column.in_(sql) | Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values | Below is the the instruction that describes the task:
### Input:
Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values
### Response:
def makeCoreValuesSubqueryCondition(engine, column, values: List[Union[int, str]]):
""" Make Core Values Subquery
:param engine: The database engine, used to determine the dialect
:param column: The column, eg TableItem.__table__.c.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(engine):
return column.in_(values)
if not isMssqlDialect(engine):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
return column.in_(sql) |
def model_field_attr(model, model_field, attr):
"""
Returns the specified attribute for the specified field on the model class.
"""
fields = dict([(field.name, field) for field in model._meta.fields])
return getattr(fields[model_field], attr) | Returns the specified attribute for the specified field on the model class. | Below is the the instruction that describes the task:
### Input:
Returns the specified attribute for the specified field on the model class.
### Response:
def model_field_attr(model, model_field, attr):
"""
Returns the specified attribute for the specified field on the model class.
"""
fields = dict([(field.name, field) for field in model._meta.fields])
return getattr(fields[model_field], attr) |
def rdn_to_dn(changes: Changeset, name: str, base_dn: str) -> Changeset:
""" Convert the rdn to a fully qualified DN for the specified LDAP
connection.
:param changes: The changes object to lookup.
:param name: rdn to convert.
:param base_dn: The base_dn to lookup.
:return: fully qualified DN.
"""
dn = changes.get_value_as_single('dn')
if dn is not None:
return changes
value = changes.get_value_as_single(name)
if value is None:
raise tldap.exceptions.ValidationError(
"Cannot use %s in dn as it is None" % name)
if isinstance(value, list):
raise tldap.exceptions.ValidationError(
"Cannot use %s in dn as it is a list" % name)
assert base_dn is not None
split_base = str2dn(base_dn)
split_new_dn = [[(name, value, 1)]] + split_base
new_dn = dn2str(split_new_dn)
return changes.set('dn', new_dn) | Convert the rdn to a fully qualified DN for the specified LDAP
connection.
:param changes: The changes object to lookup.
:param name: rdn to convert.
:param base_dn: The base_dn to lookup.
:return: fully qualified DN. | Below is the the instruction that describes the task:
### Input:
Convert the rdn to a fully qualified DN for the specified LDAP
connection.
:param changes: The changes object to lookup.
:param name: rdn to convert.
:param base_dn: The base_dn to lookup.
:return: fully qualified DN.
### Response:
def rdn_to_dn(changes: Changeset, name: str, base_dn: str) -> Changeset:
""" Convert the rdn to a fully qualified DN for the specified LDAP
connection.
:param changes: The changes object to lookup.
:param name: rdn to convert.
:param base_dn: The base_dn to lookup.
:return: fully qualified DN.
"""
dn = changes.get_value_as_single('dn')
if dn is not None:
return changes
value = changes.get_value_as_single(name)
if value is None:
raise tldap.exceptions.ValidationError(
"Cannot use %s in dn as it is None" % name)
if isinstance(value, list):
raise tldap.exceptions.ValidationError(
"Cannot use %s in dn as it is a list" % name)
assert base_dn is not None
split_base = str2dn(base_dn)
split_new_dn = [[(name, value, 1)]] + split_base
new_dn = dn2str(split_new_dn)
return changes.set('dn', new_dn) |
def remove_eoc_marker(self, text, next_text):
"""Remove end of cell marker when next cell has an explicit start marker"""
if self.cell_marker_start:
return text
if self.is_code() and text[-1] == self.comment + ' -':
# remove end of cell marker when redundant with next explicit marker
if not next_text or next_text[0].startswith(self.comment + ' + {'):
text = text[:-1]
# When we do not need the end of cell marker, number of blank lines is the max
# between that required at the end of the cell, and that required before the next cell.
if self.lines_to_end_of_cell_marker and (self.lines_to_next_cell is None or
self.lines_to_end_of_cell_marker > self.lines_to_next_cell):
self.lines_to_next_cell = self.lines_to_end_of_cell_marker
else:
# Insert blank lines at the end of the cell
blank_lines = self.lines_to_end_of_cell_marker
if blank_lines is None:
# two blank lines when required by pep8
blank_lines = pep8_lines_between_cells(text[:-1], next_text, self.ext)
blank_lines = 0 if blank_lines < 2 else 2
text = text[:-1] + [''] * blank_lines + text[-1:]
return text | Remove end of cell marker when next cell has an explicit start marker | Below is the the instruction that describes the task:
### Input:
Remove end of cell marker when next cell has an explicit start marker
### Response:
def remove_eoc_marker(self, text, next_text):
"""Remove end of cell marker when next cell has an explicit start marker"""
if self.cell_marker_start:
return text
if self.is_code() and text[-1] == self.comment + ' -':
# remove end of cell marker when redundant with next explicit marker
if not next_text or next_text[0].startswith(self.comment + ' + {'):
text = text[:-1]
# When we do not need the end of cell marker, number of blank lines is the max
# between that required at the end of the cell, and that required before the next cell.
if self.lines_to_end_of_cell_marker and (self.lines_to_next_cell is None or
self.lines_to_end_of_cell_marker > self.lines_to_next_cell):
self.lines_to_next_cell = self.lines_to_end_of_cell_marker
else:
# Insert blank lines at the end of the cell
blank_lines = self.lines_to_end_of_cell_marker
if blank_lines is None:
# two blank lines when required by pep8
blank_lines = pep8_lines_between_cells(text[:-1], next_text, self.ext)
blank_lines = 0 if blank_lines < 2 else 2
text = text[:-1] + [''] * blank_lines + text[-1:]
return text |
def _LogInvalidRunLevels(states, valid):
"""Log any invalid run states found."""
invalid = set()
for state in states:
if state not in valid:
invalid.add(state)
if invalid:
logging.warning("Invalid init runlevel(s) encountered: %s",
", ".join(invalid)) | Log any invalid run states found. | Below is the the instruction that describes the task:
### Input:
Log any invalid run states found.
### Response:
def _LogInvalidRunLevels(states, valid):
"""Log any invalid run states found."""
invalid = set()
for state in states:
if state not in valid:
invalid.add(state)
if invalid:
logging.warning("Invalid init runlevel(s) encountered: %s",
", ".join(invalid)) |
def main():
"""Parse arguments and print generated documentation to stdout."""
parser = argparse.ArgumentParser()
parser.add_argument('protofilepath')
args = parser.parse_args()
out_file = compile_protofile(args.protofilepath)
with open(out_file, 'rb') as proto_file:
# pylint: disable=no-member
file_descriptor_set = descriptor_pb2.FileDescriptorSet.FromString(
proto_file.read()
)
# pylint: enable=no-member
for file_descriptor in file_descriptor_set.file:
# Build dict of location tuples
locations = {}
for location in file_descriptor.source_code_info.location:
locations[tuple(location.path)] = location
# Add comment to top
print(make_comment('This file was automatically generated from {} and '
'should not be edited directly.'
.format(args.protofilepath)))
# Generate documentation
for index, message_desc in enumerate(file_descriptor.message_type):
generate_message_doc(message_desc, locations, (4, index))
for index, enum_desc in enumerate(file_descriptor.enum_type):
generate_enum_doc(enum_desc, locations, (5, index)) | Parse arguments and print generated documentation to stdout. | Below is the the instruction that describes the task:
### Input:
Parse arguments and print generated documentation to stdout.
### Response:
def main():
"""Parse arguments and print generated documentation to stdout."""
parser = argparse.ArgumentParser()
parser.add_argument('protofilepath')
args = parser.parse_args()
out_file = compile_protofile(args.protofilepath)
with open(out_file, 'rb') as proto_file:
# pylint: disable=no-member
file_descriptor_set = descriptor_pb2.FileDescriptorSet.FromString(
proto_file.read()
)
# pylint: enable=no-member
for file_descriptor in file_descriptor_set.file:
# Build dict of location tuples
locations = {}
for location in file_descriptor.source_code_info.location:
locations[tuple(location.path)] = location
# Add comment to top
print(make_comment('This file was automatically generated from {} and '
'should not be edited directly.'
.format(args.protofilepath)))
# Generate documentation
for index, message_desc in enumerate(file_descriptor.message_type):
generate_message_doc(message_desc, locations, (4, index))
for index, enum_desc in enumerate(file_descriptor.enum_type):
generate_enum_doc(enum_desc, locations, (5, index)) |
def create_bmi_model(self, engine, bmi_class=None, wrapper_kwargs=None):
"""initialize a bmi mode using an optional class"""
if wrapper_kwargs is None:
wrapper_kwargs = {}
if bmi_class is None:
wrapper_class = bmi.wrapper.BMIWrapper
else:
wrapper_class = self.import_from_string(bmi_class)
try:
"""most models use engine as a first argument"""
model = wrapper_class(
engine,
**wrapper_kwargs
)
except TypeError as e:
"""but old python engines are engines, so they don't, but they should """
logger.warn(
'Model wrapper %s does not accept engine as a first argument',
wrapper_class
)
model = wrapper_class(
**wrapper_kwargs
)
return model | initialize a bmi mode using an optional class | Below is the the instruction that describes the task:
### Input:
initialize a bmi mode using an optional class
### Response:
def create_bmi_model(self, engine, bmi_class=None, wrapper_kwargs=None):
"""initialize a bmi mode using an optional class"""
if wrapper_kwargs is None:
wrapper_kwargs = {}
if bmi_class is None:
wrapper_class = bmi.wrapper.BMIWrapper
else:
wrapper_class = self.import_from_string(bmi_class)
try:
"""most models use engine as a first argument"""
model = wrapper_class(
engine,
**wrapper_kwargs
)
except TypeError as e:
"""but old python engines are engines, so they don't, but they should """
logger.warn(
'Model wrapper %s does not accept engine as a first argument',
wrapper_class
)
model = wrapper_class(
**wrapper_kwargs
)
return model |
def create(
meta_schema,
validators=(),
version=None,
default_types=None,
type_checker=None,
id_of=_id_of,
):
"""
Create a new validator class.
Arguments:
meta_schema (collections.Mapping):
the meta schema for the new validator class
validators (collections.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will have its
``__name__`` set to include the version, and also will have
`jsonschema.validators.validates` automatically called for the
given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created with
a set of default types typical of JSON Schema drafts.
default_types (collections.Mapping):
.. deprecated:: 3.0.0
Please use the type_checker argument instead.
If set, it provides mappings of JSON types to Python types that
will be converted to functions and redefined in this object's
`jsonschema.TypeChecker`.
id_of (callable):
A function that given a schema, returns its ID.
Returns:
a new `jsonschema.IValidator` class
"""
if default_types is not None:
if type_checker is not None:
raise TypeError(
"Do not specify default_types when providing a type checker.",
)
_created_with_default_types = True
warn(
(
"The default_types argument is deprecated. "
"Use the type_checker argument instead."
),
DeprecationWarning,
stacklevel=2,
)
type_checker = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(default_types),
)
else:
default_types = _DEPRECATED_DEFAULT_TYPES
if type_checker is None:
_created_with_default_types = False
type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
_created_with_default_types = False
else:
_created_with_default_types = None
@add_metaclass(_DefaultTypesDeprecatingMetaClass)
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
TYPE_CHECKER = type_checker
ID_OF = staticmethod(id_of)
DEFAULT_TYPES = property(_DEFAULT_TYPES)
_DEFAULT_TYPES = dict(default_types)
_CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
def __init__(
self,
schema,
types=(),
resolver=None,
format_checker=None,
):
if types:
warn(
(
"The types argument is deprecated. Provide "
"a type_checker to jsonschema.validators.extend "
"instead."
),
DeprecationWarning,
stacklevel=2,
)
self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
_generate_legacy_type_checks(types),
)
if resolver is None:
resolver = RefResolver.from_schema(schema, id_of=id_of)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise exceptions.SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
if _schema is True:
return
elif _schema is False:
yield exceptions.ValidationError(
"False schema does not allow %r" % (instance,),
validator=None,
validator_value=None,
instance=instance,
schema=_schema,
)
return
scope = id_of(_schema)
if scope:
self.resolver.push_scope(scope)
try:
ref = _schema.get(u"$ref")
if ref is not None:
validators = [(u"$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != u"$ref":
error.schema_path.appendleft(k)
yield error
finally:
if scope:
self.resolver.pop_scope()
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
try:
return self.TYPE_CHECKER.is_type(instance, type)
except exceptions.UndefinedTypeCheck:
raise exceptions.UnknownType(type, instance, self.schema)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = version.title().replace(" ", "") + "Validator"
return Validator | Create a new validator class.
Arguments:
meta_schema (collections.Mapping):
the meta schema for the new validator class
validators (collections.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will have its
``__name__`` set to include the version, and also will have
`jsonschema.validators.validates` automatically called for the
given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created with
a set of default types typical of JSON Schema drafts.
default_types (collections.Mapping):
.. deprecated:: 3.0.0
Please use the type_checker argument instead.
If set, it provides mappings of JSON types to Python types that
will be converted to functions and redefined in this object's
`jsonschema.TypeChecker`.
id_of (callable):
A function that given a schema, returns its ID.
Returns:
a new `jsonschema.IValidator` class | Below is the the instruction that describes the task:
### Input:
Create a new validator class.
Arguments:
meta_schema (collections.Mapping):
the meta schema for the new validator class
validators (collections.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will have its
``__name__`` set to include the version, and also will have
`jsonschema.validators.validates` automatically called for the
given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created with
a set of default types typical of JSON Schema drafts.
default_types (collections.Mapping):
.. deprecated:: 3.0.0
Please use the type_checker argument instead.
If set, it provides mappings of JSON types to Python types that
will be converted to functions and redefined in this object's
`jsonschema.TypeChecker`.
id_of (callable):
A function that given a schema, returns its ID.
Returns:
a new `jsonschema.IValidator` class
### Response:
def create(
meta_schema,
validators=(),
version=None,
default_types=None,
type_checker=None,
id_of=_id_of,
):
"""
Create a new validator class.
Arguments:
meta_schema (collections.Mapping):
the meta schema for the new validator class
validators (collections.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will have its
``__name__`` set to include the version, and also will have
`jsonschema.validators.validates` automatically called for the
given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created with
a set of default types typical of JSON Schema drafts.
default_types (collections.Mapping):
.. deprecated:: 3.0.0
Please use the type_checker argument instead.
If set, it provides mappings of JSON types to Python types that
will be converted to functions and redefined in this object's
`jsonschema.TypeChecker`.
id_of (callable):
A function that given a schema, returns its ID.
Returns:
a new `jsonschema.IValidator` class
"""
if default_types is not None:
if type_checker is not None:
raise TypeError(
"Do not specify default_types when providing a type checker.",
)
_created_with_default_types = True
warn(
(
"The default_types argument is deprecated. "
"Use the type_checker argument instead."
),
DeprecationWarning,
stacklevel=2,
)
type_checker = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(default_types),
)
else:
default_types = _DEPRECATED_DEFAULT_TYPES
if type_checker is None:
_created_with_default_types = False
type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
_created_with_default_types = False
else:
_created_with_default_types = None
@add_metaclass(_DefaultTypesDeprecatingMetaClass)
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
TYPE_CHECKER = type_checker
ID_OF = staticmethod(id_of)
DEFAULT_TYPES = property(_DEFAULT_TYPES)
_DEFAULT_TYPES = dict(default_types)
_CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
def __init__(
self,
schema,
types=(),
resolver=None,
format_checker=None,
):
if types:
warn(
(
"The types argument is deprecated. Provide "
"a type_checker to jsonschema.validators.extend "
"instead."
),
DeprecationWarning,
stacklevel=2,
)
self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
_generate_legacy_type_checks(types),
)
if resolver is None:
resolver = RefResolver.from_schema(schema, id_of=id_of)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise exceptions.SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
if _schema is True:
return
elif _schema is False:
yield exceptions.ValidationError(
"False schema does not allow %r" % (instance,),
validator=None,
validator_value=None,
instance=instance,
schema=_schema,
)
return
scope = id_of(_schema)
if scope:
self.resolver.push_scope(scope)
try:
ref = _schema.get(u"$ref")
if ref is not None:
validators = [(u"$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != u"$ref":
error.schema_path.appendleft(k)
yield error
finally:
if scope:
self.resolver.pop_scope()
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
try:
return self.TYPE_CHECKER.is_type(instance, type)
except exceptions.UndefinedTypeCheck:
raise exceptions.UnknownType(type, instance, self.schema)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = version.title().replace(" ", "") + "Validator"
return Validator |
def print_all_commands(self, *, no_pager=False):
"""Print help for all commands.
Commands are sorted in alphabetical order and wrapping is done
based on the width of the terminal.
"""
formatter = self.parent_parser._get_formatter()
command_names = sorted(self.parent_parser.subparsers.choices.keys())
max_name_len = max([len(name) for name in command_names]) + 1
commands = ""
for name in command_names:
command = self.parent_parser.subparsers.choices[name]
extra_padding = max_name_len - len(name)
command_line = '%s%s%s' % (
name, ' ' * extra_padding, command.description)
while len(command_line) > formatter._width:
lines = textwrap.wrap(command_line, formatter._width)
commands += "%s\n" % lines[0]
if len(lines) > 1:
lines[1] = (' ' * max_name_len) + lines[1]
command_line = ' '.join(lines[1:])
else:
command_line = None
if command_line:
commands += "%s\n" % command_line
if no_pager:
print(commands[:-1])
else:
print_with_pager(commands[:-1]) | Print help for all commands.
Commands are sorted in alphabetical order and wrapping is done
based on the width of the terminal. | Below is the the instruction that describes the task:
### Input:
Print help for all commands.
Commands are sorted in alphabetical order and wrapping is done
based on the width of the terminal.
### Response:
def print_all_commands(self, *, no_pager=False):
"""Print help for all commands.
Commands are sorted in alphabetical order and wrapping is done
based on the width of the terminal.
"""
formatter = self.parent_parser._get_formatter()
command_names = sorted(self.parent_parser.subparsers.choices.keys())
max_name_len = max([len(name) for name in command_names]) + 1
commands = ""
for name in command_names:
command = self.parent_parser.subparsers.choices[name]
extra_padding = max_name_len - len(name)
command_line = '%s%s%s' % (
name, ' ' * extra_padding, command.description)
while len(command_line) > formatter._width:
lines = textwrap.wrap(command_line, formatter._width)
commands += "%s\n" % lines[0]
if len(lines) > 1:
lines[1] = (' ' * max_name_len) + lines[1]
command_line = ' '.join(lines[1:])
else:
command_line = None
if command_line:
commands += "%s\n" % command_line
if no_pager:
print(commands[:-1])
else:
print_with_pager(commands[:-1]) |
async def issuer_create_credential(wallet_handle: int,
cred_offer_json: str,
cred_req_json: str,
cred_values_json: str,
rev_reg_id: Optional[str],
blob_storage_reader_handle: Optional[int]) -> (str, Optional[str], Optional[str]):
"""
Check Cred Request for the given Cred Offer and issue Credential for the given Cred Request.
Cred Request must match Cred Offer. The credential definition and revocation registry definition
referenced in Cred Offer and Cred Request must be already created and stored into the wallet.
Information for this credential revocation will be store in the wallet as part of revocation registry under
generated cred_revoc_id local for this wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_offer_json: a cred offer created by issuer_create_credential_offer
:param cred_req_json: a credential request created by prover_create_credential_req
:param cred_values_json: a credential containing attribute values for each of requested attribute names.
Example:
{
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value1", "encoded": "value1_as_int" }
}
:param rev_reg_id: (Optional) id of revocation registry definition stored in the wallet
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that
will allow to read revocation tails
:return:
cred_json: Credential json containing signed credential values
{
"schema_id": string,
"cred_def_id": string,
"rev_reg_def_id", Optional<string>,
"values": <see cred_values_json above>,
// Fields below can depend on Cred Def type
"signature": <signature>,
"signature_correctness_proof": <signature_correctness_proof>
}
cred_revoc_id: local id for revocation info (Can be used for revocation of this cred)
revoc_reg_delta_json: Revocation registry delta json with a newly issued credential
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_credential: >>> wallet_handle: %r, cred_offer_json: %r, cred_req_json: %r,"
" cred_values_json: %r, rev_reg_id: %r, blob_storage_reader_handle: %r",
wallet_handle,
cred_offer_json,
cred_req_json,
cred_values_json,
rev_reg_id,
blob_storage_reader_handle)
if not hasattr(issuer_create_credential, "cb"):
logger.debug("issuer_create_credential: Creating callback")
issuer_create_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8'))
c_cred_req_json = c_char_p(cred_req_json.encode('utf-8'))
c_cred_values_json = c_char_p(cred_values_json.encode('utf-8'))
c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8')) if rev_reg_id is not None else None
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle) if blob_storage_reader_handle else -1
(cred_json, cred_revoc_id, revoc_reg_delta_json) = await do_call('indy_issuer_create_credential',
c_wallet_handle,
c_cred_offer_json,
c_cred_req_json,
c_cred_values_json,
c_rev_reg_id,
c_blob_storage_reader_handle,
issuer_create_credential.cb)
cred_json = cred_json.decode()
cred_revoc_id = cred_revoc_id.decode() if cred_revoc_id else None
revoc_reg_delta_json = revoc_reg_delta_json.decode() if revoc_reg_delta_json else None
res = (cred_json, cred_revoc_id, revoc_reg_delta_json)
logger.debug("issuer_create_credential: <<< res: %r", res)
return res | Check Cred Request for the given Cred Offer and issue Credential for the given Cred Request.
Cred Request must match Cred Offer. The credential definition and revocation registry definition
referenced in Cred Offer and Cred Request must be already created and stored into the wallet.
Information for this credential revocation will be store in the wallet as part of revocation registry under
generated cred_revoc_id local for this wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_offer_json: a cred offer created by issuer_create_credential_offer
:param cred_req_json: a credential request created by prover_create_credential_req
:param cred_values_json: a credential containing attribute values for each of requested attribute names.
Example:
{
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value1", "encoded": "value1_as_int" }
}
:param rev_reg_id: (Optional) id of revocation registry definition stored in the wallet
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that
will allow to read revocation tails
:return:
cred_json: Credential json containing signed credential values
{
"schema_id": string,
"cred_def_id": string,
"rev_reg_def_id", Optional<string>,
"values": <see cred_values_json above>,
// Fields below can depend on Cred Def type
"signature": <signature>,
"signature_correctness_proof": <signature_correctness_proof>
}
cred_revoc_id: local id for revocation info (Can be used for revocation of this cred)
revoc_reg_delta_json: Revocation registry delta json with a newly issued credential | Below is the the instruction that describes the task:
### Input:
Check Cred Request for the given Cred Offer and issue Credential for the given Cred Request.
Cred Request must match Cred Offer. The credential definition and revocation registry definition
referenced in Cred Offer and Cred Request must be already created and stored into the wallet.
Information for this credential revocation will be store in the wallet as part of revocation registry under
generated cred_revoc_id local for this wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_offer_json: a cred offer created by issuer_create_credential_offer
:param cred_req_json: a credential request created by prover_create_credential_req
:param cred_values_json: a credential containing attribute values for each of requested attribute names.
Example:
{
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value1", "encoded": "value1_as_int" }
}
:param rev_reg_id: (Optional) id of revocation registry definition stored in the wallet
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that
will allow to read revocation tails
:return:
cred_json: Credential json containing signed credential values
{
"schema_id": string,
"cred_def_id": string,
"rev_reg_def_id", Optional<string>,
"values": <see cred_values_json above>,
// Fields below can depend on Cred Def type
"signature": <signature>,
"signature_correctness_proof": <signature_correctness_proof>
}
cred_revoc_id: local id for revocation info (Can be used for revocation of this cred)
revoc_reg_delta_json: Revocation registry delta json with a newly issued credential
### Response:
async def issuer_create_credential(wallet_handle: int,
cred_offer_json: str,
cred_req_json: str,
cred_values_json: str,
rev_reg_id: Optional[str],
blob_storage_reader_handle: Optional[int]) -> (str, Optional[str], Optional[str]):
"""
Check Cred Request for the given Cred Offer and issue Credential for the given Cred Request.
Cred Request must match Cred Offer. The credential definition and revocation registry definition
referenced in Cred Offer and Cred Request must be already created and stored into the wallet.
Information for this credential revocation will be store in the wallet as part of revocation registry under
generated cred_revoc_id local for this wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_offer_json: a cred offer created by issuer_create_credential_offer
:param cred_req_json: a credential request created by prover_create_credential_req
:param cred_values_json: a credential containing attribute values for each of requested attribute names.
Example:
{
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value1", "encoded": "value1_as_int" }
}
:param rev_reg_id: (Optional) id of revocation registry definition stored in the wallet
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that
will allow to read revocation tails
:return:
cred_json: Credential json containing signed credential values
{
"schema_id": string,
"cred_def_id": string,
"rev_reg_def_id", Optional<string>,
"values": <see cred_values_json above>,
// Fields below can depend on Cred Def type
"signature": <signature>,
"signature_correctness_proof": <signature_correctness_proof>
}
cred_revoc_id: local id for revocation info (Can be used for revocation of this cred)
revoc_reg_delta_json: Revocation registry delta json with a newly issued credential
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_credential: >>> wallet_handle: %r, cred_offer_json: %r, cred_req_json: %r,"
" cred_values_json: %r, rev_reg_id: %r, blob_storage_reader_handle: %r",
wallet_handle,
cred_offer_json,
cred_req_json,
cred_values_json,
rev_reg_id,
blob_storage_reader_handle)
if not hasattr(issuer_create_credential, "cb"):
logger.debug("issuer_create_credential: Creating callback")
issuer_create_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8'))
c_cred_req_json = c_char_p(cred_req_json.encode('utf-8'))
c_cred_values_json = c_char_p(cred_values_json.encode('utf-8'))
c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8')) if rev_reg_id is not None else None
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle) if blob_storage_reader_handle else -1
(cred_json, cred_revoc_id, revoc_reg_delta_json) = await do_call('indy_issuer_create_credential',
c_wallet_handle,
c_cred_offer_json,
c_cred_req_json,
c_cred_values_json,
c_rev_reg_id,
c_blob_storage_reader_handle,
issuer_create_credential.cb)
cred_json = cred_json.decode()
cred_revoc_id = cred_revoc_id.decode() if cred_revoc_id else None
revoc_reg_delta_json = revoc_reg_delta_json.decode() if revoc_reg_delta_json else None
res = (cred_json, cred_revoc_id, revoc_reg_delta_json)
logger.debug("issuer_create_credential: <<< res: %r", res)
return res |
def chunks(seq, chunk_size):
# type: (Sequence[T], int) -> Iterable[Sequence[T]]
""" Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk.
"""
return (seq[i:i + chunk_size] for i in range(0, len(seq), chunk_size)) | Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk. | Below is the the instruction that describes the task:
### Input:
Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk.
### Response:
def chunks(seq, chunk_size):
# type: (Sequence[T], int) -> Iterable[Sequence[T]]
""" Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk.
"""
return (seq[i:i + chunk_size] for i in range(0, len(seq), chunk_size)) |
def offset(self, value):
"""
Allows for skipping a specified number of results in query. Useful
for pagination.
"""
self._query = self._query.skip(value)
return self | Allows for skipping a specified number of results in query. Useful
for pagination. | Below is the the instruction that describes the task:
### Input:
Allows for skipping a specified number of results in query. Useful
for pagination.
### Response:
def offset(self, value):
"""
Allows for skipping a specified number of results in query. Useful
for pagination.
"""
self._query = self._query.skip(value)
return self |
def _begin_write(session: UpdateSession,
loop: asyncio.AbstractEventLoop,
rootfs_file_path: str):
""" Start the write process. """
session.set_progress(0)
session.set_stage(Stages.WRITING)
write_future = asyncio.ensure_future(loop.run_in_executor(
None, file_actions.write_update, rootfs_file_path,
session.set_progress))
def write_done(fut):
exc = fut.exception()
if exc:
session.set_error(getattr(exc, 'short', str(type(exc))),
str(exc))
else:
session.set_stage(Stages.DONE)
write_future.add_done_callback(write_done) | Start the write process. | Below is the the instruction that describes the task:
### Input:
Start the write process.
### Response:
def _begin_write(session: UpdateSession,
loop: asyncio.AbstractEventLoop,
rootfs_file_path: str):
""" Start the write process. """
session.set_progress(0)
session.set_stage(Stages.WRITING)
write_future = asyncio.ensure_future(loop.run_in_executor(
None, file_actions.write_update, rootfs_file_path,
session.set_progress))
def write_done(fut):
exc = fut.exception()
if exc:
session.set_error(getattr(exc, 'short', str(type(exc))),
str(exc))
else:
session.set_stage(Stages.DONE)
write_future.add_done_callback(write_done) |
def load_into(self, obj, data, inplace=True, *args, **kwargs):
"""Load data and update existing object.
:param obj: Object to update with deserialized data.
:param data: Raw data to get value to deserialize from.
:param bool inplace: If True update data inplace;
otherwise - create new data.
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: Updated object.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
if obj is None:
raise ValueError('Load target should not be None')
if data is MISSING:
return
if data is None:
self._fail('required')
if not is_mapping(data):
self._fail('invalid', data=data)
errors_builder = ValidationErrorBuilder()
data1 = {}
for name, field in iteritems(self.fields):
try:
if name in data:
# Load new data
value = field.load_into(obj, name, data,
inplace=not self.immutable and inplace,
*args, **kwargs)
else:
# Retrive data from existing object
value = field.load(name, {
name: field.dump(name, obj, *args, **kwargs)
})
if value is not MISSING:
data1[name] = value
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
if self.allow_extra_fields is False:
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
errors_builder.add_error(name, self._error_messages['unknown'])
elif isinstance(self.allow_extra_fields, Field):
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
try:
loaded = self.allow_extra_fields.load_into(
obj, name, data,
inplace=not self.immutable and inplace,
*args, **kwargs
)
if loaded != MISSING:
data1[name] = loaded
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
errors_builder.raise_errors()
data2 = super(Object, self).load(data1, *args, **kwargs)
if self.immutable or not inplace:
result = data2
if self.constructor:
result = self.constructor(**result)
else:
for name, value in iteritems(data2):
field = self.fields.get(name, self.allow_extra_fields)
if not isinstance(field, Field):
continue
field.set_value(name, obj, value, *args, **kwargs)
result = obj
return result | Load data and update existing object.
:param obj: Object to update with deserialized data.
:param data: Raw data to get value to deserialize from.
:param bool inplace: If True update data inplace;
otherwise - create new data.
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: Updated object.
:raises: :exc:`~lollipop.errors.ValidationError` | Below is the the instruction that describes the task:
### Input:
Load data and update existing object.
:param obj: Object to update with deserialized data.
:param data: Raw data to get value to deserialize from.
:param bool inplace: If True update data inplace;
otherwise - create new data.
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: Updated object.
:raises: :exc:`~lollipop.errors.ValidationError`
### Response:
def load_into(self, obj, data, inplace=True, *args, **kwargs):
"""Load data and update existing object.
:param obj: Object to update with deserialized data.
:param data: Raw data to get value to deserialize from.
:param bool inplace: If True update data inplace;
otherwise - create new data.
:param kwargs: Same keyword arguments as for :meth:`Type.load`.
:returns: Updated object.
:raises: :exc:`~lollipop.errors.ValidationError`
"""
if obj is None:
raise ValueError('Load target should not be None')
if data is MISSING:
return
if data is None:
self._fail('required')
if not is_mapping(data):
self._fail('invalid', data=data)
errors_builder = ValidationErrorBuilder()
data1 = {}
for name, field in iteritems(self.fields):
try:
if name in data:
# Load new data
value = field.load_into(obj, name, data,
inplace=not self.immutable and inplace,
*args, **kwargs)
else:
# Retrive data from existing object
value = field.load(name, {
name: field.dump(name, obj, *args, **kwargs)
})
if value is not MISSING:
data1[name] = value
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
if self.allow_extra_fields is False:
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
errors_builder.add_error(name, self._error_messages['unknown'])
elif isinstance(self.allow_extra_fields, Field):
field_names = [name for name, _ in iteritems(self.fields)]
for name in data:
if name not in field_names:
try:
loaded = self.allow_extra_fields.load_into(
obj, name, data,
inplace=not self.immutable and inplace,
*args, **kwargs
)
if loaded != MISSING:
data1[name] = loaded
except ValidationError as ve:
errors_builder.add_error(name, ve.messages)
errors_builder.raise_errors()
data2 = super(Object, self).load(data1, *args, **kwargs)
if self.immutable or not inplace:
result = data2
if self.constructor:
result = self.constructor(**result)
else:
for name, value in iteritems(data2):
field = self.fields.get(name, self.allow_extra_fields)
if not isinstance(field, Field):
continue
field.set_value(name, obj, value, *args, **kwargs)
result = obj
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.