code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def scan_binary(patch_file, project, sha256hash, apikey):
"""
Sends Binary (sha256hash) to Virus Total API
"""
v_api = virus_total.VirusTotal()
while True:
binary_report = v_api.binary_report(sha256hash, apikey)
response_code = binary_report['response_code']
# report does not exist, need to scan
if response_code == 0:
logger.info('Performing new scan of %s.', patch_file)
scan_file = v_api.scan_file(patch_file, apikey)
logger.info('VirusTotal Response: %s', scan_file['verbose_msg'])
logger.info('Report will be rendered at: %s', scan_file['permalink'])
binary_report = v_api.binary_report(sha256hash, apikey)
# Item is still queued
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 1:
logger.info('Report found, job complete.')
break
positives = binary_report['positives']
if positives == 0:
negative_report(binary_report, sha256hash, project, patch_file)
else:
positive_report(binary_report, sha256hash, project, patch_file) | Sends Binary (sha256hash) to Virus Total API | Below is the the instruction that describes the task:
### Input:
Sends Binary (sha256hash) to Virus Total API
### Response:
def scan_binary(patch_file, project, sha256hash, apikey):
"""
Sends Binary (sha256hash) to Virus Total API
"""
v_api = virus_total.VirusTotal()
while True:
binary_report = v_api.binary_report(sha256hash, apikey)
response_code = binary_report['response_code']
# report does not exist, need to scan
if response_code == 0:
logger.info('Performing new scan of %s.', patch_file)
scan_file = v_api.scan_file(patch_file, apikey)
logger.info('VirusTotal Response: %s', scan_file['verbose_msg'])
logger.info('Report will be rendered at: %s', scan_file['permalink'])
binary_report = v_api.binary_report(sha256hash, apikey)
# Item is still queued
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 1:
logger.info('Report found, job complete.')
break
positives = binary_report['positives']
if positives == 0:
negative_report(binary_report, sha256hash, project, patch_file)
else:
positive_report(binary_report, sha256hash, project, patch_file) |
def stop(self):
"""
Stops this bot.
Returns as soon as all running threads have finished processing.
"""
self.log.debug('Stopping bot {}'.format(self._name))
self._stop = True
for t in self._threads:
t.join()
self.log.debug('Stopping bot {} finished. All threads joined.'.format(self._name)) | Stops this bot.
Returns as soon as all running threads have finished processing. | Below is the the instruction that describes the task:
### Input:
Stops this bot.
Returns as soon as all running threads have finished processing.
### Response:
def stop(self):
"""
Stops this bot.
Returns as soon as all running threads have finished processing.
"""
self.log.debug('Stopping bot {}'.format(self._name))
self._stop = True
for t in self._threads:
t.join()
self.log.debug('Stopping bot {} finished. All threads joined.'.format(self._name)) |
def hash_md5(self):
"""Calculate md5 fingerprint.
Shamelessly copied from http://stackoverflow.com/questions/6682815/deriving-an-ssh-fingerprint-from-a-public-key-in-python
For specification, see RFC4716, section 4."""
fp_plain = hashlib.md5(self._decoded_key).hexdigest()
return "MD5:" + ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])) | Calculate md5 fingerprint.
Shamelessly copied from http://stackoverflow.com/questions/6682815/deriving-an-ssh-fingerprint-from-a-public-key-in-python
For specification, see RFC4716, section 4. | Below is the the instruction that describes the task:
### Input:
Calculate md5 fingerprint.
Shamelessly copied from http://stackoverflow.com/questions/6682815/deriving-an-ssh-fingerprint-from-a-public-key-in-python
For specification, see RFC4716, section 4.
### Response:
def hash_md5(self):
"""Calculate md5 fingerprint.
Shamelessly copied from http://stackoverflow.com/questions/6682815/deriving-an-ssh-fingerprint-from-a-public-key-in-python
For specification, see RFC4716, section 4."""
fp_plain = hashlib.md5(self._decoded_key).hexdigest()
return "MD5:" + ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])) |
def pull(self, action, image_name, **kwargs):
"""
Pulls an image for a container configuration
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param image_name: Image name.
:type image_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
"""
config_id = action.config_id
registry, __, image = config_id.config_name.rpartition('/')
if registry and '.' in registry and registry not in self._login_registries:
self.login(action, registry, insecure_registry=kwargs.get('insecure_registry'))
log.info("Pulling image %s:%s.", config_id.config_name, config_id.instance_name)
res = action.client.pull(repository=config_id.config_name, tag=config_id.instance_name, **kwargs)
log.debug("Done pulling image %s:%s.", config_id.config_name, config_id.instance_name)
self._policy.images[action.client_name].refresh_repo(config_id.config_name)
log.debug("Refreshed image cache for repo %s.", config_id.config_name)
return res | Pulls an image for a container configuration
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param image_name: Image name.
:type image_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | Below is the the instruction that describes the task:
### Input:
Pulls an image for a container configuration
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param image_name: Image name.
:type image_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
### Response:
def pull(self, action, image_name, **kwargs):
"""
Pulls an image for a container configuration
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param image_name: Image name.
:type image_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
"""
config_id = action.config_id
registry, __, image = config_id.config_name.rpartition('/')
if registry and '.' in registry and registry not in self._login_registries:
self.login(action, registry, insecure_registry=kwargs.get('insecure_registry'))
log.info("Pulling image %s:%s.", config_id.config_name, config_id.instance_name)
res = action.client.pull(repository=config_id.config_name, tag=config_id.instance_name, **kwargs)
log.debug("Done pulling image %s:%s.", config_id.config_name, config_id.instance_name)
self._policy.images[action.client_name].refresh_repo(config_id.config_name)
log.debug("Refreshed image cache for repo %s.", config_id.config_name)
return res |
def parsedata(self, packet):
'''parse the data section of a packet, it can range from 0 to many bytes'''
data = []
datalength = ord(packet[3])
position = 4
while position < datalength + 4:
data.append(packet[position])
position += 1
return data | parse the data section of a packet, it can range from 0 to many bytes | Below is the the instruction that describes the task:
### Input:
parse the data section of a packet, it can range from 0 to many bytes
### Response:
def parsedata(self, packet):
'''parse the data section of a packet, it can range from 0 to many bytes'''
data = []
datalength = ord(packet[3])
position = 4
while position < datalength + 4:
data.append(packet[position])
position += 1
return data |
def _original_vocab(tmp_dir):
"""Returns a set containing the original vocabulary.
This is important for comparing with published results.
Args:
tmp_dir: directory containing dataset.
Returns:
a set of strings
"""
vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/"
"vocab-2016-09-10.txt")
vocab_filename = os.path.basename(vocab_url + ".en")
vocab_filepath = os.path.join(tmp_dir, vocab_filename)
if not os.path.exists(vocab_filepath):
generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url)
return set([
text_encoder.native_to_unicode(l.strip())
for l in tf.gfile.Open(vocab_filepath)
]) | Returns a set containing the original vocabulary.
This is important for comparing with published results.
Args:
tmp_dir: directory containing dataset.
Returns:
a set of strings | Below is the the instruction that describes the task:
### Input:
Returns a set containing the original vocabulary.
This is important for comparing with published results.
Args:
tmp_dir: directory containing dataset.
Returns:
a set of strings
### Response:
def _original_vocab(tmp_dir):
"""Returns a set containing the original vocabulary.
This is important for comparing with published results.
Args:
tmp_dir: directory containing dataset.
Returns:
a set of strings
"""
vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/"
"vocab-2016-09-10.txt")
vocab_filename = os.path.basename(vocab_url + ".en")
vocab_filepath = os.path.join(tmp_dir, vocab_filename)
if not os.path.exists(vocab_filepath):
generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url)
return set([
text_encoder.native_to_unicode(l.strip())
for l in tf.gfile.Open(vocab_filepath)
]) |
def get_security_attributes_for_user(user=None):
"""
Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified).
"""
if user is None:
user = get_current_user()
assert isinstance(user, security.TOKEN_USER), (
"user must be TOKEN_USER instance")
SD = security.SECURITY_DESCRIPTOR()
SA = security.SECURITY_ATTRIBUTES()
# by attaching the actual security descriptor, it will be garbage-
# collected with the security attributes
SA.descriptor = SD
SA.bInheritHandle = 1
ctypes.windll.advapi32.InitializeSecurityDescriptor(
ctypes.byref(SD),
security.SECURITY_DESCRIPTOR.REVISION)
ctypes.windll.advapi32.SetSecurityDescriptorOwner(
ctypes.byref(SD),
user.SID, 0)
return SA | Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified). | Below is the the instruction that describes the task:
### Input:
Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified).
### Response:
def get_security_attributes_for_user(user=None):
"""
Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified).
"""
if user is None:
user = get_current_user()
assert isinstance(user, security.TOKEN_USER), (
"user must be TOKEN_USER instance")
SD = security.SECURITY_DESCRIPTOR()
SA = security.SECURITY_ATTRIBUTES()
# by attaching the actual security descriptor, it will be garbage-
# collected with the security attributes
SA.descriptor = SD
SA.bInheritHandle = 1
ctypes.windll.advapi32.InitializeSecurityDescriptor(
ctypes.byref(SD),
security.SECURITY_DESCRIPTOR.REVISION)
ctypes.windll.advapi32.SetSecurityDescriptorOwner(
ctypes.byref(SD),
user.SID, 0)
return SA |
def _get_transformers(self):
"""Load the contents of meta_file and extract information about the transformers.
Returns:
dict: tuple(str, str) -> Transformer.
"""
transformer_dict = {}
for table in self.metadata['tables']:
table_name = table['name']
for field in table['fields']:
transformer_type = field.get('type')
if transformer_type:
col_name = field['name']
transformer_dict[(table_name, col_name)] = transformer_type
return transformer_dict | Load the contents of meta_file and extract information about the transformers.
Returns:
dict: tuple(str, str) -> Transformer. | Below is the the instruction that describes the task:
### Input:
Load the contents of meta_file and extract information about the transformers.
Returns:
dict: tuple(str, str) -> Transformer.
### Response:
def _get_transformers(self):
"""Load the contents of meta_file and extract information about the transformers.
Returns:
dict: tuple(str, str) -> Transformer.
"""
transformer_dict = {}
for table in self.metadata['tables']:
table_name = table['name']
for field in table['fields']:
transformer_type = field.get('type')
if transformer_type:
col_name = field['name']
transformer_dict[(table_name, col_name)] = transformer_type
return transformer_dict |
def handler(self, scheme_name=None):
""" Return handler which scheme name matches the specified one
:param scheme_name: scheme name to search for
:return: WSchemeHandler class or None (if matching handler was not found)
"""
if scheme_name is None:
return self.__default_handler_cls
for handler in self.__handlers_cls:
if handler.scheme_specification().scheme_name() == scheme_name:
return handler | Return handler which scheme name matches the specified one
:param scheme_name: scheme name to search for
:return: WSchemeHandler class or None (if matching handler was not found) | Below is the the instruction that describes the task:
### Input:
Return handler which scheme name matches the specified one
:param scheme_name: scheme name to search for
:return: WSchemeHandler class or None (if matching handler was not found)
### Response:
def handler(self, scheme_name=None):
""" Return handler which scheme name matches the specified one
:param scheme_name: scheme name to search for
:return: WSchemeHandler class or None (if matching handler was not found)
"""
if scheme_name is None:
return self.__default_handler_cls
for handler in self.__handlers_cls:
if handler.scheme_specification().scheme_name() == scheme_name:
return handler |
def validate_account_status(self, data):
"""Performs field validation for account_status. If any
region is not deleted, account_status cannot be deleted
"""
deleted_status = 'deleted'
region_status = data.get('status')
account_status = data.get('account_status')
for region in region_status:
if region['status'] != deleted_status and account_status == deleted_status:
raise ValidationError('Account Status cannot be "deleted" if a region is not "deleted"') | Performs field validation for account_status. If any
region is not deleted, account_status cannot be deleted | Below is the the instruction that describes the task:
### Input:
Performs field validation for account_status. If any
region is not deleted, account_status cannot be deleted
### Response:
def validate_account_status(self, data):
"""Performs field validation for account_status. If any
region is not deleted, account_status cannot be deleted
"""
deleted_status = 'deleted'
region_status = data.get('status')
account_status = data.get('account_status')
for region in region_status:
if region['status'] != deleted_status and account_status == deleted_status:
raise ValidationError('Account Status cannot be "deleted" if a region is not "deleted"') |
def has_parser(self, url_info: URLInfo):
'''Return whether a parser has been created for the URL.'''
key = self.url_info_key(url_info)
return key in self._parsers | Return whether a parser has been created for the URL. | Below is the the instruction that describes the task:
### Input:
Return whether a parser has been created for the URL.
### Response:
def has_parser(self, url_info: URLInfo):
'''Return whether a parser has been created for the URL.'''
key = self.url_info_key(url_info)
return key in self._parsers |
def _send_message_to_topic(self, topic, message):
"""
Send a message to a Kafka topic.
Parameters
----------
topic : str
The kafka topic where the message should be sent to.
message : FranzEvent
The message to be sent.
Raises
------
franz.InvalidMessage
"""
message_result = self.producer.send(topic, message)
self.check_for_message_exception(message_result)
return message_result | Send a message to a Kafka topic.
Parameters
----------
topic : str
The kafka topic where the message should be sent to.
message : FranzEvent
The message to be sent.
Raises
------
franz.InvalidMessage | Below is the the instruction that describes the task:
### Input:
Send a message to a Kafka topic.
Parameters
----------
topic : str
The kafka topic where the message should be sent to.
message : FranzEvent
The message to be sent.
Raises
------
franz.InvalidMessage
### Response:
def _send_message_to_topic(self, topic, message):
"""
Send a message to a Kafka topic.
Parameters
----------
topic : str
The kafka topic where the message should be sent to.
message : FranzEvent
The message to be sent.
Raises
------
franz.InvalidMessage
"""
message_result = self.producer.send(topic, message)
self.check_for_message_exception(message_result)
return message_result |
def walk_git_files(self, repo_path=''):
"""
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@type repo_path: str
@return: Iterator to traverse files under git control relative to main_repo_abspath.
@rtype: Iterable
"""
repo_abspath = path.join(self.main_repo_abspath, repo_path)
assert repo_abspath not in self._check_attr_gens
self._check_attr_gens[repo_abspath] = self.check_attr(repo_abspath, ['export-ignore'])
try:
repo_file_paths = self.run_git_shell(
'git ls-files -z --cached --full-name --no-empty-directory',
repo_abspath
).split('\0')[:-1]
for repo_file_path in repo_file_paths:
repo_file_abspath = path.join(repo_abspath, repo_file_path) # absolute file path
main_repo_file_path = path.join(repo_path, repo_file_path) # relative to main_repo_abspath
# Only list symlinks and files.
if not path.islink(repo_file_abspath) and path.isdir(repo_file_abspath):
continue
if self.is_file_excluded(repo_abspath, repo_file_path):
continue
yield main_repo_file_path
if self.force_sub:
self.run_git_shell('git submodule init', repo_abspath)
self.run_git_shell('git submodule update', repo_abspath)
try:
repo_gitmodules_abspath = path.join(repo_abspath, ".gitmodules")
with open(repo_gitmodules_abspath) as f:
lines = f.readlines()
for l in lines:
m = re.match("^\\s*path\\s*=\\s*(.*)\\s*$", l)
if m:
repo_submodule_path = m.group(1) # relative to repo_path
main_repo_submodule_path = path.join(repo_path, repo_submodule_path) # relative to main_repo_abspath
if self.is_file_excluded(repo_abspath, repo_submodule_path):
continue
for main_repo_submodule_file_path in self.walk_git_files(main_repo_submodule_path):
repo_submodule_file_path = path.relpath(main_repo_submodule_file_path, repo_path) # relative to repo_path
if self.is_file_excluded(repo_abspath, repo_submodule_file_path):
continue
yield main_repo_submodule_file_path
except IOError:
pass
finally:
self._check_attr_gens[repo_abspath].close()
del self._check_attr_gens[repo_abspath] | An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@type repo_path: str
@return: Iterator to traverse files under git control relative to main_repo_abspath.
@rtype: Iterable | Below is the the instruction that describes the task:
### Input:
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@type repo_path: str
@return: Iterator to traverse files under git control relative to main_repo_abspath.
@rtype: Iterable
### Response:
def walk_git_files(self, repo_path=''):
"""
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@type repo_path: str
@return: Iterator to traverse files under git control relative to main_repo_abspath.
@rtype: Iterable
"""
repo_abspath = path.join(self.main_repo_abspath, repo_path)
assert repo_abspath not in self._check_attr_gens
self._check_attr_gens[repo_abspath] = self.check_attr(repo_abspath, ['export-ignore'])
try:
repo_file_paths = self.run_git_shell(
'git ls-files -z --cached --full-name --no-empty-directory',
repo_abspath
).split('\0')[:-1]
for repo_file_path in repo_file_paths:
repo_file_abspath = path.join(repo_abspath, repo_file_path) # absolute file path
main_repo_file_path = path.join(repo_path, repo_file_path) # relative to main_repo_abspath
# Only list symlinks and files.
if not path.islink(repo_file_abspath) and path.isdir(repo_file_abspath):
continue
if self.is_file_excluded(repo_abspath, repo_file_path):
continue
yield main_repo_file_path
if self.force_sub:
self.run_git_shell('git submodule init', repo_abspath)
self.run_git_shell('git submodule update', repo_abspath)
try:
repo_gitmodules_abspath = path.join(repo_abspath, ".gitmodules")
with open(repo_gitmodules_abspath) as f:
lines = f.readlines()
for l in lines:
m = re.match("^\\s*path\\s*=\\s*(.*)\\s*$", l)
if m:
repo_submodule_path = m.group(1) # relative to repo_path
main_repo_submodule_path = path.join(repo_path, repo_submodule_path) # relative to main_repo_abspath
if self.is_file_excluded(repo_abspath, repo_submodule_path):
continue
for main_repo_submodule_file_path in self.walk_git_files(main_repo_submodule_path):
repo_submodule_file_path = path.relpath(main_repo_submodule_file_path, repo_path) # relative to repo_path
if self.is_file_excluded(repo_abspath, repo_submodule_file_path):
continue
yield main_repo_submodule_file_path
except IOError:
pass
finally:
self._check_attr_gens[repo_abspath].close()
del self._check_attr_gens[repo_abspath] |
def _get_demand_graph(self):
"""create demand graph"""
# The number of clusters
K = self.origins.shape[0]
# Set the number of accounts in each cluster to be the same
# as for the nearest neighbor solution
demand = self.nearest_targets.groupby('origin_id')['geometry'].count().to_dict()
# Set up the graph so we can extract and initialize the node labels.
# For each iteration, we're going to sort all our data by their origin
# label assignments in order to properly index our nodes.
self.targets = self.targets.sort_values('labels').reset_index(drop=True)
# Add target nodes
g = nx.DiGraph()
g.add_nodes_from(self.targets['target_id'], demand=-1)
# Add origin nodes
for idx in demand:
g.add_node(int(idx), demand=demand[idx])
# Dictionary of labels (corresponding to the sales rep) for
# each med center node.
dict_M = {
i: (
self.targets[self.targets['target_id'] == i]['labels'].values
if i in self.targets.target_id.values
else np.array([demand[i]])
)
for i in g.nodes
}
logging.info('Graph and demand dictionary created')
return dict_M, demand | create demand graph | Below is the the instruction that describes the task:
### Input:
create demand graph
### Response:
def _get_demand_graph(self):
"""create demand graph"""
# The number of clusters
K = self.origins.shape[0]
# Set the number of accounts in each cluster to be the same
# as for the nearest neighbor solution
demand = self.nearest_targets.groupby('origin_id')['geometry'].count().to_dict()
# Set up the graph so we can extract and initialize the node labels.
# For each iteration, we're going to sort all our data by their origin
# label assignments in order to properly index our nodes.
self.targets = self.targets.sort_values('labels').reset_index(drop=True)
# Add target nodes
g = nx.DiGraph()
g.add_nodes_from(self.targets['target_id'], demand=-1)
# Add origin nodes
for idx in demand:
g.add_node(int(idx), demand=demand[idx])
# Dictionary of labels (corresponding to the sales rep) for
# each med center node.
dict_M = {
i: (
self.targets[self.targets['target_id'] == i]['labels'].values
if i in self.targets.target_id.values
else np.array([demand[i]])
)
for i in g.nodes
}
logging.info('Graph and demand dictionary created')
return dict_M, demand |
def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() | Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame | Below is the the instruction that describes the task:
### Input:
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
### Response:
def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() |
def delete_checkpoint(self, checkpoint_id, path):
"""delete a checkpoint for a file"""
with self.engine.begin() as db:
return delete_single_remote_checkpoint(
db, self.user_id, path, checkpoint_id,
) | delete a checkpoint for a file | Below is the the instruction that describes the task:
### Input:
delete a checkpoint for a file
### Response:
def delete_checkpoint(self, checkpoint_id, path):
"""delete a checkpoint for a file"""
with self.engine.begin() as db:
return delete_single_remote_checkpoint(
db, self.user_id, path, checkpoint_id,
) |
def to_comment(comment):
"""
Convert a string to a ``.properties`` file comment. All non-Latin-1
characters in the string are escaped using ``\\uXXXX`` escapes (after
converting non-BMP characters to surrogate pairs), a ``#`` is prepended to
the string, any CR LF or CR line breaks in the string are converted to LF,
and a ``#`` is inserted after any line break not already followed by a
``#`` or ``!``. No trailing newline is added.
>>> to_comment('They say foo=bar,\\r\\nbut does bar=foo?')
'#They say foo=bar,\\n#but does bar=foo?'
:param comment: the string to convert to a comment
:type comment: text string
:rtype: text string
"""
return '#' + re.sub(r'[^\x00-\xFF]', _esc,
re.sub(r'\n(?![#!])', '\n#',
re.sub(r'\r\n?', '\n', comment))) | Convert a string to a ``.properties`` file comment. All non-Latin-1
characters in the string are escaped using ``\\uXXXX`` escapes (after
converting non-BMP characters to surrogate pairs), a ``#`` is prepended to
the string, any CR LF or CR line breaks in the string are converted to LF,
and a ``#`` is inserted after any line break not already followed by a
``#`` or ``!``. No trailing newline is added.
>>> to_comment('They say foo=bar,\\r\\nbut does bar=foo?')
'#They say foo=bar,\\n#but does bar=foo?'
:param comment: the string to convert to a comment
:type comment: text string
:rtype: text string | Below is the the instruction that describes the task:
### Input:
Convert a string to a ``.properties`` file comment. All non-Latin-1
characters in the string are escaped using ``\\uXXXX`` escapes (after
converting non-BMP characters to surrogate pairs), a ``#`` is prepended to
the string, any CR LF or CR line breaks in the string are converted to LF,
and a ``#`` is inserted after any line break not already followed by a
``#`` or ``!``. No trailing newline is added.
>>> to_comment('They say foo=bar,\\r\\nbut does bar=foo?')
'#They say foo=bar,\\n#but does bar=foo?'
:param comment: the string to convert to a comment
:type comment: text string
:rtype: text string
### Response:
def to_comment(comment):
"""
Convert a string to a ``.properties`` file comment. All non-Latin-1
characters in the string are escaped using ``\\uXXXX`` escapes (after
converting non-BMP characters to surrogate pairs), a ``#`` is prepended to
the string, any CR LF or CR line breaks in the string are converted to LF,
and a ``#`` is inserted after any line break not already followed by a
``#`` or ``!``. No trailing newline is added.
>>> to_comment('They say foo=bar,\\r\\nbut does bar=foo?')
'#They say foo=bar,\\n#but does bar=foo?'
:param comment: the string to convert to a comment
:type comment: text string
:rtype: text string
"""
return '#' + re.sub(r'[^\x00-\xFF]', _esc,
re.sub(r'\n(?![#!])', '\n#',
re.sub(r'\r\n?', '\n', comment))) |
def parse_user(raw):
""" Parse nick(!user(@host)?)? structure. """
nick = raw
user = None
host = None
# Attempt to extract host.
if protocol.HOST_SEPARATOR in raw:
raw, host = raw.split(protocol.HOST_SEPARATOR)
# Attempt to extract user.
if protocol.USER_SEPARATOR in raw:
nick, user = raw.split(protocol.USER_SEPARATOR)
return nick, user, host | Parse nick(!user(@host)?)? structure. | Below is the the instruction that describes the task:
### Input:
Parse nick(!user(@host)?)? structure.
### Response:
def parse_user(raw):
""" Parse nick(!user(@host)?)? structure. """
nick = raw
user = None
host = None
# Attempt to extract host.
if protocol.HOST_SEPARATOR in raw:
raw, host = raw.split(protocol.HOST_SEPARATOR)
# Attempt to extract user.
if protocol.USER_SEPARATOR in raw:
nick, user = raw.split(protocol.USER_SEPARATOR)
return nick, user, host |
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results | get a full list, handling APIv3's paging | Below is the the instruction that describes the task:
### Input:
get a full list, handling APIv3's paging
### Response:
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results |
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False | Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False. | Below is the the instruction that describes the task:
### Input:
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
### Response:
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False |
def loadTableData(self, df, df_key='index',table="node", table_key_column = "name", \
network="current",namespace="default",verbose=False):
"""
Loads tables into cytoscape.
:param df: a pandas dataframe to load
:param df_key: key column in df, default="index"
:param table: target table, default="node"
:param table_key_column: table key column, default="name"
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param namespace (string, optional): Node, Edge, and Network objects support
the default, local, and hidden namespaces. Root networks also support the
shared namespace. Custom namespaces may be specified by Apps.
:param verbose: print more information
:returns: output of put request
"""
u=self.__url
host=u.split("//")[1].split(":")[0]
port=u.split(":")[2].split("/")[0]
version=u.split(":")[2].split("/")[1]
if type(network) != int:
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["columnList","namespace","network"],["SUID",namespace,network])
networkID=api(namespace="network", command="get attribute",PARAMS=PARAMS, host=host,port=str(port),version=version)
PARAMS=set_param(["columnList","namespace","network"],["name",namespace,network])
networkname=api(namespace="network", command="get attribute",PARAMS=PARAMS, host=host,port=str(port),version=version)
network=networkID[0]["SUID"]
networkname=networkname[0]["name"]
tmp=df.copy()
if df_key!="index":
tmp.index=tmp[df_key].tolist()
tmp=tmp.drop([df_key],axis=1)
tablen=networkname+" default node"
data=[]
for c in tmp.columns.tolist():
tmpcol=tmp[[c]].dropna()
for r in tmpcol.index.tolist():
cell={}
cell[str(table_key_column)]=str(r) # {"name":"p53"}
val=tmpcol.loc[r,c]
if type(val) != str:
val=float(val)
cell[str(c)]=val
data.append(cell)
upload={"key":table_key_column,"dataKey":table_key_column,\
"data":data}
URL="http://"+str(host)+":"+str(port)+"/v1/networks/"+str(network)+"/tables/"+namespace+table
if verbose:
print("'"+URL+"'", upload)
sys.stdout.flush()
r = requests.put(url = URL, json = upload)
if verbose:
print(r)
checkresponse(r)
res=r.content
return res | Loads tables into cytoscape.
:param df: a pandas dataframe to load
:param df_key: key column in df, default="index"
:param table: target table, default="node"
:param table_key_column: table key column, default="name"
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param namespace (string, optional): Node, Edge, and Network objects support
the default, local, and hidden namespaces. Root networks also support the
shared namespace. Custom namespaces may be specified by Apps.
:param verbose: print more information
:returns: output of put request | Below is the the instruction that describes the task:
### Input:
Loads tables into cytoscape.
:param df: a pandas dataframe to load
:param df_key: key column in df, default="index"
:param table: target table, default="node"
:param table_key_column: table key column, default="name"
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param namespace (string, optional): Node, Edge, and Network objects support
the default, local, and hidden namespaces. Root networks also support the
shared namespace. Custom namespaces may be specified by Apps.
:param verbose: print more information
:returns: output of put request
### Response:
def loadTableData(self, df, df_key='index',table="node", table_key_column = "name", \
network="current",namespace="default",verbose=False):
"""
Loads tables into cytoscape.
:param df: a pandas dataframe to load
:param df_key: key column in df, default="index"
:param table: target table, default="node"
:param table_key_column: table key column, default="name"
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param namespace (string, optional): Node, Edge, and Network objects support
the default, local, and hidden namespaces. Root networks also support the
shared namespace. Custom namespaces may be specified by Apps.
:param verbose: print more information
:returns: output of put request
"""
u=self.__url
host=u.split("//")[1].split(":")[0]
port=u.split(":")[2].split("/")[0]
version=u.split(":")[2].split("/")[1]
if type(network) != int:
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["columnList","namespace","network"],["SUID",namespace,network])
networkID=api(namespace="network", command="get attribute",PARAMS=PARAMS, host=host,port=str(port),version=version)
PARAMS=set_param(["columnList","namespace","network"],["name",namespace,network])
networkname=api(namespace="network", command="get attribute",PARAMS=PARAMS, host=host,port=str(port),version=version)
network=networkID[0]["SUID"]
networkname=networkname[0]["name"]
tmp=df.copy()
if df_key!="index":
tmp.index=tmp[df_key].tolist()
tmp=tmp.drop([df_key],axis=1)
tablen=networkname+" default node"
data=[]
for c in tmp.columns.tolist():
tmpcol=tmp[[c]].dropna()
for r in tmpcol.index.tolist():
cell={}
cell[str(table_key_column)]=str(r) # {"name":"p53"}
val=tmpcol.loc[r,c]
if type(val) != str:
val=float(val)
cell[str(c)]=val
data.append(cell)
upload={"key":table_key_column,"dataKey":table_key_column,\
"data":data}
URL="http://"+str(host)+":"+str(port)+"/v1/networks/"+str(network)+"/tables/"+namespace+table
if verbose:
print("'"+URL+"'", upload)
sys.stdout.flush()
r = requests.put(url = URL, json = upload)
if verbose:
print(r)
checkresponse(r)
res=r.content
return res |
def add_analysis_attributes(self, group_name, attrs, clear=False):
""" Add attributes on the group or dataset specified.
:param group_name: The name of the group (or dataset).
:param attrs: A dictionary representing the attributes to add.
:param clear: If set, any existing attributes will be cleared.
The specified group name can be any existing path (relative to the
"Analyses" group. It can be a group or a dataset.
"""
self.assert_writeable()
group = 'Analyses/{}'.format(group_name)
self._add_attributes(group, attrs, clear) | Add attributes on the group or dataset specified.
:param group_name: The name of the group (or dataset).
:param attrs: A dictionary representing the attributes to add.
:param clear: If set, any existing attributes will be cleared.
The specified group name can be any existing path (relative to the
"Analyses" group. It can be a group or a dataset. | Below is the the instruction that describes the task:
### Input:
Add attributes on the group or dataset specified.
:param group_name: The name of the group (or dataset).
:param attrs: A dictionary representing the attributes to add.
:param clear: If set, any existing attributes will be cleared.
The specified group name can be any existing path (relative to the
"Analyses" group. It can be a group or a dataset.
### Response:
def add_analysis_attributes(self, group_name, attrs, clear=False):
""" Add attributes on the group or dataset specified.
:param group_name: The name of the group (or dataset).
:param attrs: A dictionary representing the attributes to add.
:param clear: If set, any existing attributes will be cleared.
The specified group name can be any existing path (relative to the
"Analyses" group. It can be a group or a dataset.
"""
self.assert_writeable()
group = 'Analyses/{}'.format(group_name)
self._add_attributes(group, attrs, clear) |
def remove_memberships(self, team, users):
'''
**Description**
Remove user memberships from specified team.
**Arguments**
- **team**: the name of the team from which user memberships are removed
- **users**: list of usernames which should be removed from team
**Example**
`examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_
'''
res = self.list_memberships(team)
if res[0] is False:
return res
old_memberships = res[1]
new_memberships = {k: v for k, v in old_memberships.items() if k not in users}
res = self.edit_team(team, new_memberships)
if res[0] is False:
return res
else:
return [True, None] | **Description**
Remove user memberships from specified team.
**Arguments**
- **team**: the name of the team from which user memberships are removed
- **users**: list of usernames which should be removed from team
**Example**
`examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_ | Below is the the instruction that describes the task:
### Input:
**Description**
Remove user memberships from specified team.
**Arguments**
- **team**: the name of the team from which user memberships are removed
- **users**: list of usernames which should be removed from team
**Example**
`examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_
### Response:
def remove_memberships(self, team, users):
'''
**Description**
Remove user memberships from specified team.
**Arguments**
- **team**: the name of the team from which user memberships are removed
- **users**: list of usernames which should be removed from team
**Example**
`examples/user_team_mgmt_extended.py <https://github.com/draios/python-sdc-client/blob/master/examples/user_team_mgmt_extended.py>`_
'''
res = self.list_memberships(team)
if res[0] is False:
return res
old_memberships = res[1]
new_memberships = {k: v for k, v in old_memberships.items() if k not in users}
res = self.edit_team(team, new_memberships)
if res[0] is False:
return res
else:
return [True, None] |
def deserialize_current_record_to_current_model(record, current_model):
"""
Utility function that will take a Dynamo event record and turn it into the proper Current Dynamo object.
This will remove the "current table" specific fields, and properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:return:
"""
# Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table:
if record.get(EVENT_TOO_BIG_FLAG):
return get_full_current_object(record['dynamodb']['Keys']['arn']['S'], current_model)
new_image = remove_global_dynamo_specific_fields(record['dynamodb']['NewImage'])
data = {}
for item, value in new_image.items():
# This could end up as loss of precision
data[item] = DESER.deserialize(value)
return current_model(**data) | Utility function that will take a Dynamo event record and turn it into the proper Current Dynamo object.
This will remove the "current table" specific fields, and properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:return: | Below is the the instruction that describes the task:
### Input:
Utility function that will take a Dynamo event record and turn it into the proper Current Dynamo object.
This will remove the "current table" specific fields, and properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:return:
### Response:
def deserialize_current_record_to_current_model(record, current_model):
"""
Utility function that will take a Dynamo event record and turn it into the proper Current Dynamo object.
This will remove the "current table" specific fields, and properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:return:
"""
# Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table:
if record.get(EVENT_TOO_BIG_FLAG):
return get_full_current_object(record['dynamodb']['Keys']['arn']['S'], current_model)
new_image = remove_global_dynamo_specific_fields(record['dynamodb']['NewImage'])
data = {}
for item, value in new_image.items():
# This could end up as loss of precision
data[item] = DESER.deserialize(value)
return current_model(**data) |
def circular(args):
"""
%prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based.
"""
from jcvi.assembly.goldenpath import overlap
p = OptionParser(circular.__doc__)
p.add_option("--flip", default=False, action="store_true",
help="Reverse complement the sequence")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, startpos = args
startpos = int(startpos)
key, seq = next(parse_fasta(fastafile))
aseq = seq[startpos:]
bseq = seq[:startpos]
aseqfile, bseqfile = "a.seq", "b.seq"
for f, s in zip((aseqfile, bseqfile), (aseq, bseq)):
fw = must_open(f, "w")
print(">{0}\n{1}".format(f, s), file=fw)
fw.close()
o = overlap([aseqfile, bseqfile])
seq = aseq[:o.qstop] + bseq[o.sstop:]
seq = Seq(seq)
if opts.flip:
seq = seq.reverse_complement()
for f in (aseqfile, bseqfile):
os.remove(f)
fw = must_open(opts.outfile, "w")
rec = SeqRecord(seq, id=key, description="")
SeqIO.write([rec], fw, "fasta")
fw.close() | %prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based. | Below is the the instruction that describes the task:
### Input:
%prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based.
### Response:
def circular(args):
"""
%prog circular fastafile startpos
Make circular genome, startpos is the place to start the sequence. This can
be determined by mapping to a reference. Self overlaps are then resolved.
Startpos is 1-based.
"""
from jcvi.assembly.goldenpath import overlap
p = OptionParser(circular.__doc__)
p.add_option("--flip", default=False, action="store_true",
help="Reverse complement the sequence")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, startpos = args
startpos = int(startpos)
key, seq = next(parse_fasta(fastafile))
aseq = seq[startpos:]
bseq = seq[:startpos]
aseqfile, bseqfile = "a.seq", "b.seq"
for f, s in zip((aseqfile, bseqfile), (aseq, bseq)):
fw = must_open(f, "w")
print(">{0}\n{1}".format(f, s), file=fw)
fw.close()
o = overlap([aseqfile, bseqfile])
seq = aseq[:o.qstop] + bseq[o.sstop:]
seq = Seq(seq)
if opts.flip:
seq = seq.reverse_complement()
for f in (aseqfile, bseqfile):
os.remove(f)
fw = must_open(opts.outfile, "w")
rec = SeqRecord(seq, id=key, description="")
SeqIO.write([rec], fw, "fasta")
fw.close() |
def box2poly(line):
"""
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
Parameters
----------
line : str
A string containing a DS9 region command for a box.
Returns
-------
poly : [ra, dec, ...]
The corners of the box in clockwise order from top left.
"""
words = re.split('[(\s,)]', line)
ra = words[1]
dec = words[2]
width = words[3]
height = words[4]
if ":" in ra:
ra = Angle(ra, unit=u.hour)
else:
ra = Angle(ra, unit=u.degree)
dec = Angle(dec, unit=u.degree)
width = Angle(float(width[:-1])/2, unit=u.arcsecond) # strip the "
height = Angle(float(height[:-1])/2, unit=u.arcsecond) # strip the "
center = SkyCoord(ra, dec)
tl = center.ra.degree+width.degree, center.dec.degree+height.degree
tr = center.ra.degree-width.degree, center.dec.degree+height.degree
bl = center.ra.degree+width.degree, center.dec.degree-height.degree
br = center.ra.degree-width.degree, center.dec.degree-height.degree
return np.ravel([tl, tr, br, bl]).tolist() | Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
Parameters
----------
line : str
A string containing a DS9 region command for a box.
Returns
-------
poly : [ra, dec, ...]
The corners of the box in clockwise order from top left. | Below is the the instruction that describes the task:
### Input:
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
Parameters
----------
line : str
A string containing a DS9 region command for a box.
Returns
-------
poly : [ra, dec, ...]
The corners of the box in clockwise order from top left.
### Response:
def box2poly(line):
"""
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
Parameters
----------
line : str
A string containing a DS9 region command for a box.
Returns
-------
poly : [ra, dec, ...]
The corners of the box in clockwise order from top left.
"""
words = re.split('[(\s,)]', line)
ra = words[1]
dec = words[2]
width = words[3]
height = words[4]
if ":" in ra:
ra = Angle(ra, unit=u.hour)
else:
ra = Angle(ra, unit=u.degree)
dec = Angle(dec, unit=u.degree)
width = Angle(float(width[:-1])/2, unit=u.arcsecond) # strip the "
height = Angle(float(height[:-1])/2, unit=u.arcsecond) # strip the "
center = SkyCoord(ra, dec)
tl = center.ra.degree+width.degree, center.dec.degree+height.degree
tr = center.ra.degree-width.degree, center.dec.degree+height.degree
bl = center.ra.degree+width.degree, center.dec.degree-height.degree
br = center.ra.degree-width.degree, center.dec.degree-height.degree
return np.ravel([tl, tr, br, bl]).tolist() |
def _add_strings_to_commastring(self, field, strings):
# type: (str, List[str]) -> bool
"""Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
"""
allstringsadded = True
for string in strings:
if not self._add_string_to_commastring(field, string):
allstringsadded = False
return allstringsadded | Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present. | Below is the the instruction that describes the task:
### Input:
Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
### Response:
def _add_strings_to_commastring(self, field, strings):
# type: (str, List[str]) -> bool
"""Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
"""
allstringsadded = True
for string in strings:
if not self._add_string_to_commastring(field, string):
allstringsadded = False
return allstringsadded |
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return '' | Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong. | Below is the the instruction that describes the task:
### Input:
Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
### Response:
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return '' |
def child_of(self, tax_id):
"""Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom).
"""
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
s = select([self.nodes.c.tax_id],
and_(self.nodes.c.parent_id == tax_id,
or_(*[self.nodes.c.rank == r
for r in self.ranks_below(rank)])))
res = s.execute()
output = res.fetchone()
if not output:
msg = ('No children of tax_id {} with '
'rank below {} found in database')
msg = msg.format(tax_id, rank)
log.warning(msg)
return None
else:
r = output[0]
assert self.is_ancestor_of(r, tax_id)
return r | Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom). | Below is the the instruction that describes the task:
### Input:
Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom).
### Response:
def child_of(self, tax_id):
"""Return None or a tax id of a child of *tax_id*.
If *tax_id* is None, then always returns None. Otherwise
returns a child if one exists, else None. The child must have
a proper rank below that of tax_id (i.e., genus, species, but
not no_rank or below_below_kingdom).
"""
if tax_id is None:
return None
parent_id, rank = self._node(tax_id)
s = select([self.nodes.c.tax_id],
and_(self.nodes.c.parent_id == tax_id,
or_(*[self.nodes.c.rank == r
for r in self.ranks_below(rank)])))
res = s.execute()
output = res.fetchone()
if not output:
msg = ('No children of tax_id {} with '
'rank below {} found in database')
msg = msg.format(tax_id, rank)
log.warning(msg)
return None
else:
r = output[0]
assert self.is_ancestor_of(r, tax_id)
return r |
def create_image_uri(region, framework, instance_type, framework_version, py_version=None,
account='520713654638', accelerator_type=None, optimized_families=None):
"""Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized).
framework_version (str): The version of the framework.
py_version (str): Optional. Python version. If specified, should be one of 'py2' or 'py3'.
If not specified, image uri will not include a python component.
account (str): AWS account that contains the image. (default: '520713654638')
accelerator_type (str): SageMaker Elastic Inference accelerator type.
optimized_families (str): Instance families for which there exist specific optimized images.
Returns:
str: The appropriate image URI based on the given parameters.
"""
optimized_families = optimized_families or []
if py_version and py_version not in VALID_PY_VERSIONS:
raise ValueError('invalid py_version argument: {}'.format(py_version))
# Handle Account Number for Gov Cloud
account = VALID_ACCOUNTS_BY_REGION.get(region, account)
# Handle Local Mode
if instance_type.startswith('local'):
device_type = 'cpu' if instance_type == 'local' else 'gpu'
elif not instance_type.startswith('ml.'):
raise ValueError('{} is not a valid SageMaker instance type. See: '
'https://aws.amazon.com/sagemaker/pricing/instance-types/'.format(instance_type))
else:
family = instance_type.split('.')[1]
# For some frameworks, we have optimized images for specific families, e.g c5 or p3. In those cases,
# we use the family name in the image tag. In other cases, we use 'cpu' or 'gpu'.
if family in optimized_families:
device_type = family
elif family[0] in ['g', 'p']:
device_type = 'gpu'
else:
device_type = 'cpu'
if py_version:
tag = "{}-{}-{}".format(framework_version, device_type, py_version)
else:
tag = "{}-{}".format(framework_version, device_type)
if _accelerator_type_valid_for_framework(framework=framework, accelerator_type=accelerator_type,
optimized_families=optimized_families):
framework += '-eia'
return "{}/sagemaker-{}:{}" \
.format(get_ecr_image_uri_prefix(account, region), framework, tag) | Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized).
framework_version (str): The version of the framework.
py_version (str): Optional. Python version. If specified, should be one of 'py2' or 'py3'.
If not specified, image uri will not include a python component.
account (str): AWS account that contains the image. (default: '520713654638')
accelerator_type (str): SageMaker Elastic Inference accelerator type.
optimized_families (str): Instance families for which there exist specific optimized images.
Returns:
str: The appropriate image URI based on the given parameters. | Below is the the instruction that describes the task:
### Input:
Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized).
framework_version (str): The version of the framework.
py_version (str): Optional. Python version. If specified, should be one of 'py2' or 'py3'.
If not specified, image uri will not include a python component.
account (str): AWS account that contains the image. (default: '520713654638')
accelerator_type (str): SageMaker Elastic Inference accelerator type.
optimized_families (str): Instance families for which there exist specific optimized images.
Returns:
str: The appropriate image URI based on the given parameters.
### Response:
def create_image_uri(region, framework, instance_type, framework_version, py_version=None,
account='520713654638', accelerator_type=None, optimized_families=None):
"""Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized).
framework_version (str): The version of the framework.
py_version (str): Optional. Python version. If specified, should be one of 'py2' or 'py3'.
If not specified, image uri will not include a python component.
account (str): AWS account that contains the image. (default: '520713654638')
accelerator_type (str): SageMaker Elastic Inference accelerator type.
optimized_families (str): Instance families for which there exist specific optimized images.
Returns:
str: The appropriate image URI based on the given parameters.
"""
optimized_families = optimized_families or []
if py_version and py_version not in VALID_PY_VERSIONS:
raise ValueError('invalid py_version argument: {}'.format(py_version))
# Handle Account Number for Gov Cloud
account = VALID_ACCOUNTS_BY_REGION.get(region, account)
# Handle Local Mode
if instance_type.startswith('local'):
device_type = 'cpu' if instance_type == 'local' else 'gpu'
elif not instance_type.startswith('ml.'):
raise ValueError('{} is not a valid SageMaker instance type. See: '
'https://aws.amazon.com/sagemaker/pricing/instance-types/'.format(instance_type))
else:
family = instance_type.split('.')[1]
# For some frameworks, we have optimized images for specific families, e.g c5 or p3. In those cases,
# we use the family name in the image tag. In other cases, we use 'cpu' or 'gpu'.
if family in optimized_families:
device_type = family
elif family[0] in ['g', 'p']:
device_type = 'gpu'
else:
device_type = 'cpu'
if py_version:
tag = "{}-{}-{}".format(framework_version, device_type, py_version)
else:
tag = "{}-{}".format(framework_version, device_type)
if _accelerator_type_valid_for_framework(framework=framework, accelerator_type=accelerator_type,
optimized_families=optimized_families):
framework += '-eia'
return "{}/sagemaker-{}:{}" \
.format(get_ecr_image_uri_prefix(account, region), framework, tag) |
def be_array_from_bytes(fmt, data):
"""
Reads an array from bytestring with big-endian data.
"""
arr = array.array(str(fmt), data)
return fix_byteorder(arr) | Reads an array from bytestring with big-endian data. | Below is the the instruction that describes the task:
### Input:
Reads an array from bytestring with big-endian data.
### Response:
def be_array_from_bytes(fmt, data):
"""
Reads an array from bytestring with big-endian data.
"""
arr = array.array(str(fmt), data)
return fix_byteorder(arr) |
def rewrite(self, source_bucket, source_object, destination_bucket,
destination_object=None):
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if (source_bucket == destination_bucket and
source_object == destination_object):
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(bucket_name=source_bucket)
source_object = source_bucket.blob(blob_name=source_object)
destination_bucket = client.get_bucket(bucket_name=destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object, token=token
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object, destination_bucket.name) | Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str | Below is the the instruction that describes the task:
### Input:
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
### Response:
def rewrite(self, source_bucket, source_object, destination_bucket,
destination_object=None):
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if (source_bucket == destination_bucket and
source_object == destination_object):
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(bucket_name=source_bucket)
source_object = source_bucket.blob(blob_name=source_object)
destination_bucket = client.get_bucket(bucket_name=destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object, token=token
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object, destination_bucket.name) |
def wordnet_annotations(self):
"""The list of wordnet annotations of ``words`` layer."""
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis] | The list of wordnet annotations of ``words`` layer. | Below is the the instruction that describes the task:
### Input:
The list of wordnet annotations of ``words`` layer.
### Response:
def wordnet_annotations(self):
"""The list of wordnet annotations of ``words`` layer."""
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis] |
def _parse_flowcontrol_receive(self, config):
"""Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
"""
value = 'off'
match = re.search(r'flowcontrol receive (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_receive=value) | Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict | Below is the the instruction that describes the task:
### Input:
Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
### Response:
def _parse_flowcontrol_receive(self, config):
"""Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
"""
value = 'off'
match = re.search(r'flowcontrol receive (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_receive=value) |
def spikesplot(ts_z, outer_gs=None, tr=None, zscored=True, spike_thresh=6., title='Spike plot',
ax=None, cmap='viridis', hide_x=True, nskip=0):
"""
A spikes plot. Thanks to Bob Dogherty (this docstring needs be improved with proper ack)
"""
if ax is None:
ax = plt.gca()
if outer_gs is not None:
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer_gs,
width_ratios=[1, 100], wspace=0.0)
ax = plt.subplot(gs[1])
# Define TR and number of frames
if tr is None:
tr = 1.
# Load timeseries, zscored slice-wise
nslices = ts_z.shape[0]
ntsteps = ts_z.shape[1]
# Load a colormap
my_cmap = cm.get_cmap(cmap)
norm = Normalize(vmin=0, vmax=float(nslices - 1))
colors = [my_cmap(norm(sl)) for sl in range(nslices)]
stem = len(np.unique(ts_z).tolist()) == 2
# Plot one line per axial slice timeseries
for sl in range(nslices):
if not stem:
ax.plot(ts_z[sl, :], color=colors[sl], lw=0.5)
else:
markerline, stemlines, baseline = ax.stem(ts_z[sl, :])
plt.setp(markerline, 'markerfacecolor', colors[sl])
plt.setp(baseline, 'color', colors[sl], 'linewidth', 1)
plt.setp(stemlines, 'color', colors[sl], 'linewidth', 1)
# Handle X, Y axes
ax.grid(False)
# Handle X axis
last = ntsteps - 1
ax.set_xlim(0, last)
xticks = list(range(0, last)[::20]) + [last] if not hide_x else []
ax.set_xticks(xticks)
if not hide_x:
if tr is None:
ax.set_xlabel('time (frame #)')
else:
ax.set_xlabel('time (s)')
ax.set_xticklabels(
['%.02f' % t for t in (tr * np.array(xticks)).tolist()])
# Handle Y axis
ylabel = 'slice-wise noise average on background'
if zscored:
ylabel += ' (z-scored)'
zs_max = np.abs(ts_z).max()
ax.set_ylim((-(np.abs(ts_z[:, nskip:]).max()) * 1.05,
(np.abs(ts_z[:, nskip:]).max()) * 1.05))
ytick_vals = np.arange(0.0, zs_max, float(np.floor(zs_max / 2.)))
yticks = list(
reversed((-1.0 * ytick_vals[ytick_vals > 0]).tolist())) + ytick_vals.tolist()
# TODO plot min/max or mark spikes
# yticks.insert(0, ts_z.min())
# yticks += [ts_z.max()]
for val in ytick_vals:
ax.plot((0, ntsteps - 1), (-val, -val), 'k:', alpha=.2)
ax.plot((0, ntsteps - 1), (val, val), 'k:', alpha=.2)
# Plot spike threshold
if zs_max < spike_thresh:
ax.plot((0, ntsteps - 1), (-spike_thresh, -spike_thresh), 'k:')
ax.plot((0, ntsteps - 1), (spike_thresh, spike_thresh), 'k:')
else:
yticks = [ts_z[:, nskip:].min(),
np.median(ts_z[:, nskip:]),
ts_z[:, nskip:].max()]
ax.set_ylim(0, max(yticks[-1] * 1.05, (yticks[-1] - yticks[0]) * 2.0 + yticks[-1]))
# ax.set_ylim(ts_z[:, nskip:].min() * 0.95,
# ts_z[:, nskip:].max() * 1.05)
ax.annotate(
ylabel, xy=(0.0, 0.7), xycoords='axes fraction',
xytext=(0, 0), textcoords='offset points',
va='center', ha='left', color='gray', size=4,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none', 'color': 'none',
'lw': 0, 'alpha': 0.8})
ax.set_yticks([])
ax.set_yticklabels([])
# if yticks:
# # ax.set_yticks(yticks)
# # ax.set_yticklabels(['%.02f' % y for y in yticks])
# # Plot maximum and minimum horizontal lines
# ax.plot((0, ntsteps - 1), (yticks[0], yticks[0]), 'k:')
# ax.plot((0, ntsteps - 1), (yticks[-1], yticks[-1]), 'k:')
for side in ["top", "right"]:
ax.spines[side].set_color('none')
ax.spines[side].set_visible(False)
if not hide_x:
ax.spines["bottom"].set_position(('outward', 10))
ax.xaxis.set_ticks_position('bottom')
else:
ax.spines["bottom"].set_color('none')
ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_position(('outward', 30))
# ax.yaxis.set_ticks_position('left')
ax.spines["left"].set_visible(False)
ax.spines["left"].set_color(None)
# labels = [label for label in ax.yaxis.get_ticklabels()]
# labels[0].set_weight('bold')
# labels[-1].set_weight('bold')
if title:
ax.set_title(title)
return ax | A spikes plot. Thanks to Bob Dogherty (this docstring needs be improved with proper ack) | Below is the the instruction that describes the task:
### Input:
A spikes plot. Thanks to Bob Dogherty (this docstring needs be improved with proper ack)
### Response:
def spikesplot(ts_z, outer_gs=None, tr=None, zscored=True, spike_thresh=6., title='Spike plot',
ax=None, cmap='viridis', hide_x=True, nskip=0):
"""
A spikes plot. Thanks to Bob Dogherty (this docstring needs be improved with proper ack)
"""
if ax is None:
ax = plt.gca()
if outer_gs is not None:
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer_gs,
width_ratios=[1, 100], wspace=0.0)
ax = plt.subplot(gs[1])
# Define TR and number of frames
if tr is None:
tr = 1.
# Load timeseries, zscored slice-wise
nslices = ts_z.shape[0]
ntsteps = ts_z.shape[1]
# Load a colormap
my_cmap = cm.get_cmap(cmap)
norm = Normalize(vmin=0, vmax=float(nslices - 1))
colors = [my_cmap(norm(sl)) for sl in range(nslices)]
stem = len(np.unique(ts_z).tolist()) == 2
# Plot one line per axial slice timeseries
for sl in range(nslices):
if not stem:
ax.plot(ts_z[sl, :], color=colors[sl], lw=0.5)
else:
markerline, stemlines, baseline = ax.stem(ts_z[sl, :])
plt.setp(markerline, 'markerfacecolor', colors[sl])
plt.setp(baseline, 'color', colors[sl], 'linewidth', 1)
plt.setp(stemlines, 'color', colors[sl], 'linewidth', 1)
# Handle X, Y axes
ax.grid(False)
# Handle X axis
last = ntsteps - 1
ax.set_xlim(0, last)
xticks = list(range(0, last)[::20]) + [last] if not hide_x else []
ax.set_xticks(xticks)
if not hide_x:
if tr is None:
ax.set_xlabel('time (frame #)')
else:
ax.set_xlabel('time (s)')
ax.set_xticklabels(
['%.02f' % t for t in (tr * np.array(xticks)).tolist()])
# Handle Y axis
ylabel = 'slice-wise noise average on background'
if zscored:
ylabel += ' (z-scored)'
zs_max = np.abs(ts_z).max()
ax.set_ylim((-(np.abs(ts_z[:, nskip:]).max()) * 1.05,
(np.abs(ts_z[:, nskip:]).max()) * 1.05))
ytick_vals = np.arange(0.0, zs_max, float(np.floor(zs_max / 2.)))
yticks = list(
reversed((-1.0 * ytick_vals[ytick_vals > 0]).tolist())) + ytick_vals.tolist()
# TODO plot min/max or mark spikes
# yticks.insert(0, ts_z.min())
# yticks += [ts_z.max()]
for val in ytick_vals:
ax.plot((0, ntsteps - 1), (-val, -val), 'k:', alpha=.2)
ax.plot((0, ntsteps - 1), (val, val), 'k:', alpha=.2)
# Plot spike threshold
if zs_max < spike_thresh:
ax.plot((0, ntsteps - 1), (-spike_thresh, -spike_thresh), 'k:')
ax.plot((0, ntsteps - 1), (spike_thresh, spike_thresh), 'k:')
else:
yticks = [ts_z[:, nskip:].min(),
np.median(ts_z[:, nskip:]),
ts_z[:, nskip:].max()]
ax.set_ylim(0, max(yticks[-1] * 1.05, (yticks[-1] - yticks[0]) * 2.0 + yticks[-1]))
# ax.set_ylim(ts_z[:, nskip:].min() * 0.95,
# ts_z[:, nskip:].max() * 1.05)
ax.annotate(
ylabel, xy=(0.0, 0.7), xycoords='axes fraction',
xytext=(0, 0), textcoords='offset points',
va='center', ha='left', color='gray', size=4,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none', 'color': 'none',
'lw': 0, 'alpha': 0.8})
ax.set_yticks([])
ax.set_yticklabels([])
# if yticks:
# # ax.set_yticks(yticks)
# # ax.set_yticklabels(['%.02f' % y for y in yticks])
# # Plot maximum and minimum horizontal lines
# ax.plot((0, ntsteps - 1), (yticks[0], yticks[0]), 'k:')
# ax.plot((0, ntsteps - 1), (yticks[-1], yticks[-1]), 'k:')
for side in ["top", "right"]:
ax.spines[side].set_color('none')
ax.spines[side].set_visible(False)
if not hide_x:
ax.spines["bottom"].set_position(('outward', 10))
ax.xaxis.set_ticks_position('bottom')
else:
ax.spines["bottom"].set_color('none')
ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_position(('outward', 30))
# ax.yaxis.set_ticks_position('left')
ax.spines["left"].set_visible(False)
ax.spines["left"].set_color(None)
# labels = [label for label in ax.yaxis.get_ticklabels()]
# labels[0].set_weight('bold')
# labels[-1].set_weight('bold')
if title:
ax.set_title(title)
return ax |
def get_connection(self, host, port):
"""
Returns a ``StrictRedis`` connection instance.
"""
return beanstalkc.Connection(
host=host,
port=port
) | Returns a ``StrictRedis`` connection instance. | Below is the the instruction that describes the task:
### Input:
Returns a ``StrictRedis`` connection instance.
### Response:
def get_connection(self, host, port):
"""
Returns a ``StrictRedis`` connection instance.
"""
return beanstalkc.Connection(
host=host,
port=port
) |
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict | a helper method for recursively validating keys in dictionaries
:return input_dict | Below is the the instruction that describes the task:
### Input:
a helper method for recursively validating keys in dictionaries
:return input_dict
### Response:
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict |
def todict(cls, parseresult, mode='parens', ns={}):
"""
Helper function to return dictionary given the parse results
from a pyparsing.nestedExpr object (containing keywords).
The ns is a dynamic namespace (typically the IPython Notebook
namespace) used to update the class-level namespace.
"""
grouped, kwargs = [], {}
tokens = cls.collect_tokens(parseresult, mode)
# Group tokens without '=' and append to last token containing '='
for group in groupby(tokens, lambda el: '=' in el):
(val, items) = group
if val is True:
grouped += list(items)
if val is False:
elements =list(items)
# Assume anything before ) or } can be joined with commas
# (e.g tuples with spaces in them)
joiner=',' if any(((')' in el) or ('}' in el))
for el in elements) else ''
grouped[-1] += joiner + joiner.join(elements)
for keyword in grouped:
# Tuple ('a', 3) becomes (,'a',3) and '(,' is never valid
# Same for some of the other joining errors corrected here
for (fst,snd) in [('(,', '('), ('{,', '{'), ('=,','='),
(',:',':'), (':,', ':'), (',,', ','),
(',.', '.')]:
keyword = keyword.replace(fst, snd)
try:
kwargs.update(eval('dict(%s)' % keyword,
dict(cls.namespace, **ns)))
except:
if cls.abort_on_eval_failure:
raise SyntaxError("Could not evaluate keyword: %r"
% keyword)
msg = "Ignoring keyword pair that fails to evaluate: '%s'"
parsewarning.warning(msg % keyword)
return kwargs | Helper function to return dictionary given the parse results
from a pyparsing.nestedExpr object (containing keywords).
The ns is a dynamic namespace (typically the IPython Notebook
namespace) used to update the class-level namespace. | Below is the the instruction that describes the task:
### Input:
Helper function to return dictionary given the parse results
from a pyparsing.nestedExpr object (containing keywords).
The ns is a dynamic namespace (typically the IPython Notebook
namespace) used to update the class-level namespace.
### Response:
def todict(cls, parseresult, mode='parens', ns={}):
"""
Helper function to return dictionary given the parse results
from a pyparsing.nestedExpr object (containing keywords).
The ns is a dynamic namespace (typically the IPython Notebook
namespace) used to update the class-level namespace.
"""
grouped, kwargs = [], {}
tokens = cls.collect_tokens(parseresult, mode)
# Group tokens without '=' and append to last token containing '='
for group in groupby(tokens, lambda el: '=' in el):
(val, items) = group
if val is True:
grouped += list(items)
if val is False:
elements =list(items)
# Assume anything before ) or } can be joined with commas
# (e.g tuples with spaces in them)
joiner=',' if any(((')' in el) or ('}' in el))
for el in elements) else ''
grouped[-1] += joiner + joiner.join(elements)
for keyword in grouped:
# Tuple ('a', 3) becomes (,'a',3) and '(,' is never valid
# Same for some of the other joining errors corrected here
for (fst,snd) in [('(,', '('), ('{,', '{'), ('=,','='),
(',:',':'), (':,', ':'), (',,', ','),
(',.', '.')]:
keyword = keyword.replace(fst, snd)
try:
kwargs.update(eval('dict(%s)' % keyword,
dict(cls.namespace, **ns)))
except:
if cls.abort_on_eval_failure:
raise SyntaxError("Could not evaluate keyword: %r"
% keyword)
msg = "Ignoring keyword pair that fails to evaluate: '%s'"
parsewarning.warning(msg % keyword)
return kwargs |
def body(quantity=2, separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3, as_list=False):
"""Return a random email text."""
return lorem_ipsum.paragraphs(quantity=quantity, separator=separator,
wrap_start=wrap_start, wrap_end=wrap_end,
html=html,
sentences_quantity=sentences_quantity,
as_list=as_list) | Return a random email text. | Below is the the instruction that describes the task:
### Input:
Return a random email text.
### Response:
def body(quantity=2, separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3, as_list=False):
"""Return a random email text."""
return lorem_ipsum.paragraphs(quantity=quantity, separator=separator,
wrap_start=wrap_start, wrap_end=wrap_end,
html=html,
sentences_quantity=sentences_quantity,
as_list=as_list) |
def prepare_plot_data(data_file):
"""
Return a list of Plotly elements representing the network graph
"""
G = ig.Graph.Read_GML(data_file)
layout = G.layout('graphopt')
labels = list(G.vs['label'])
N = len(labels)
E = [e.tuple for e in G.es]
community = G.community_multilevel().membership
communities = len(set(community))
color_list = community_colors(communities)
Xn = [layout[k][0] for k in range(N)]
Yn = [layout[k][1] for k in range(N)]
Xe = []
Ye = []
for e in E:
Xe += [layout[e[0]][0], layout[e[1]][0], None]
Ye += [layout[e[0]][1], layout[e[1]][1], None]
lines = Scatter(x=Xe,
y=Ye,
mode='lines',
line=Line(color='rgb(210,210,210)', width=1),
hoverinfo='none'
)
plot_data = [lines]
node_x = [[] for i in range(communities)]
node_y = [[] for i in range(communities)]
node_labels = [[] for i in range(communities)]
for j in range(len(community)):
index = community[j]
node_x[index].append(layout[j][0])
node_y[index].append(layout[j][1])
node_labels[index].append(labels[j])
for i in range(communities):
trace = Scatter(x=node_x[i],
y=node_y[i],
mode='markers',
name='ntw',
marker=Marker(symbol='dot',
size=5,
color=color_list[i],
line=Line(
color='rgb(50,50,50)', width=0.5)
),
text=node_labels[i],
hoverinfo='text'
)
plot_data.append(trace)
return plot_data | Return a list of Plotly elements representing the network graph | Below is the the instruction that describes the task:
### Input:
Return a list of Plotly elements representing the network graph
### Response:
def prepare_plot_data(data_file):
"""
Return a list of Plotly elements representing the network graph
"""
G = ig.Graph.Read_GML(data_file)
layout = G.layout('graphopt')
labels = list(G.vs['label'])
N = len(labels)
E = [e.tuple for e in G.es]
community = G.community_multilevel().membership
communities = len(set(community))
color_list = community_colors(communities)
Xn = [layout[k][0] for k in range(N)]
Yn = [layout[k][1] for k in range(N)]
Xe = []
Ye = []
for e in E:
Xe += [layout[e[0]][0], layout[e[1]][0], None]
Ye += [layout[e[0]][1], layout[e[1]][1], None]
lines = Scatter(x=Xe,
y=Ye,
mode='lines',
line=Line(color='rgb(210,210,210)', width=1),
hoverinfo='none'
)
plot_data = [lines]
node_x = [[] for i in range(communities)]
node_y = [[] for i in range(communities)]
node_labels = [[] for i in range(communities)]
for j in range(len(community)):
index = community[j]
node_x[index].append(layout[j][0])
node_y[index].append(layout[j][1])
node_labels[index].append(labels[j])
for i in range(communities):
trace = Scatter(x=node_x[i],
y=node_y[i],
mode='markers',
name='ntw',
marker=Marker(symbol='dot',
size=5,
color=color_list[i],
line=Line(
color='rgb(50,50,50)', width=0.5)
),
text=node_labels[i],
hoverinfo='text'
)
plot_data.append(trace)
return plot_data |
def rename(self, container, name):
"""
Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res) | Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def rename(self, container, name):
"""
Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res) |
def get_tag_value(string, pre, post, tagtype=float, greedy=True):
"""
Extracts the value of a tag from a string.
Parameters
-----------------
pre : str
regular expression to match before the the tag value
post : str | list | tuple
regular expression to match after the the tag value
if list than the regular expressions will be combined into the regular expression (?=post[0]|post[1]|..)
tagtype : str | float | int
the type to which the tag value should be converted to
greedy : bool
Whether the regular expression is gredy or not.
Returns
---------------
Tag value if found, None otherwise
Example
------------
get_tag_value('PID_23.5.txt', pre=r'PID_' , post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23.5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23_5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23
get_tag_value('PID_23.txt', pre=r'PID_', post='.txt') should return 23
get_tag_value('PID.txt', pre=r'PID_', post='.txt') should return None
TODO Make list/tuple input for pre
"""
greedy = '?' if greedy else '' # For greedy search
if isinstance(post, (list, tuple)):
post = '(?=' + '|'.join(post) + ')'
tag_list = re.findall(r'{pre}(.+{greedy}){post}'.format(pre=pre, post=post, greedy=greedy),
string)
if len(tag_list) > 1:
raise ValueError('More than one matching pattern found... check filename')
elif len(tag_list) == 0:
return None
else:
return tagtype(tag_list[0]) | Extracts the value of a tag from a string.
Parameters
-----------------
pre : str
regular expression to match before the the tag value
post : str | list | tuple
regular expression to match after the the tag value
if list than the regular expressions will be combined into the regular expression (?=post[0]|post[1]|..)
tagtype : str | float | int
the type to which the tag value should be converted to
greedy : bool
Whether the regular expression is gredy or not.
Returns
---------------
Tag value if found, None otherwise
Example
------------
get_tag_value('PID_23.5.txt', pre=r'PID_' , post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23.5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23_5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23
get_tag_value('PID_23.txt', pre=r'PID_', post='.txt') should return 23
get_tag_value('PID.txt', pre=r'PID_', post='.txt') should return None
TODO Make list/tuple input for pre | Below is the the instruction that describes the task:
### Input:
Extracts the value of a tag from a string.
Parameters
-----------------
pre : str
regular expression to match before the the tag value
post : str | list | tuple
regular expression to match after the the tag value
if list than the regular expressions will be combined into the regular expression (?=post[0]|post[1]|..)
tagtype : str | float | int
the type to which the tag value should be converted to
greedy : bool
Whether the regular expression is gredy or not.
Returns
---------------
Tag value if found, None otherwise
Example
------------
get_tag_value('PID_23.5.txt', pre=r'PID_' , post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23.5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23_5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23
get_tag_value('PID_23.txt', pre=r'PID_', post='.txt') should return 23
get_tag_value('PID.txt', pre=r'PID_', post='.txt') should return None
TODO Make list/tuple input for pre
### Response:
def get_tag_value(string, pre, post, tagtype=float, greedy=True):
"""
Extracts the value of a tag from a string.
Parameters
-----------------
pre : str
regular expression to match before the the tag value
post : str | list | tuple
regular expression to match after the the tag value
if list than the regular expressions will be combined into the regular expression (?=post[0]|post[1]|..)
tagtype : str | float | int
the type to which the tag value should be converted to
greedy : bool
Whether the regular expression is gredy or not.
Returns
---------------
Tag value if found, None otherwise
Example
------------
get_tag_value('PID_23.5.txt', pre=r'PID_' , post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23.5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23.5
get_tag_value('PID_23_5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23
get_tag_value('PID_23.txt', pre=r'PID_', post='.txt') should return 23
get_tag_value('PID.txt', pre=r'PID_', post='.txt') should return None
TODO Make list/tuple input for pre
"""
greedy = '?' if greedy else '' # For greedy search
if isinstance(post, (list, tuple)):
post = '(?=' + '|'.join(post) + ')'
tag_list = re.findall(r'{pre}(.+{greedy}){post}'.format(pre=pre, post=post, greedy=greedy),
string)
if len(tag_list) > 1:
raise ValueError('More than one matching pattern found... check filename')
elif len(tag_list) == 0:
return None
else:
return tagtype(tag_list[0]) |
def gauss_distribution(function_variable, standard_deviation, mean=0):
r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37
"""
return (1 / (standard_deviation * np.sqrt(2 * np.pi)) *
np.exp(-(function_variable - mean)**2 /
(2 * standard_deviation**2))) | r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37 | Below is the the instruction that describes the task:
### Input:
r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37
### Response:
def gauss_distribution(function_variable, standard_deviation, mean=0):
r"""
Gauss distribution.
The Gauss distribution is used in the function
:py:func:`~.power_curves.smooth_power_curve` for power curve smoothing.
Parameters
----------
function_variable : float
Variable of the gaussian distribution.
standard_deviation : float
Standard deviation of the Gauss distribution.
mean : Float
Defines the offset of the Gauss distribution. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on the type of
`wind_speed`.
Notes
-----
The following equation is used [1]_:
.. math:: f(x) = \frac{1}{\sigma \sqrt{2 \pi}} \exp
\left[-\frac{(x-\mu)^2}{2 \sigma^2}\right]
with:
:math:`\sigma`: standard deviation, :math:`\mu`: mean
References
----------
.. [1] Berendsen, H.: "A Student's Guide to Data and Error Analysis".
New York, Cambridge University Press, 2011, p. 37
"""
return (1 / (standard_deviation * np.sqrt(2 * np.pi)) *
np.exp(-(function_variable - mean)**2 /
(2 * standard_deviation**2))) |
def _processChanges(self, unused_output):
"""Send info about pulled changes to the master and record current.
HgPoller does the recording by moving the working dir to the head
of the branch.
We don't update the tree (unnecessary treatment and waste of space)
instead, we simply store the current rev number in a file.
Recall that hg rev numbers are local and incremental.
"""
for branch in self.branches + self.bookmarks:
rev = yield self._getHead(branch)
if rev is None:
# Nothing pulled?
continue
yield self._processBranchChanges(rev, branch) | Send info about pulled changes to the master and record current.
HgPoller does the recording by moving the working dir to the head
of the branch.
We don't update the tree (unnecessary treatment and waste of space)
instead, we simply store the current rev number in a file.
Recall that hg rev numbers are local and incremental. | Below is the the instruction that describes the task:
### Input:
Send info about pulled changes to the master and record current.
HgPoller does the recording by moving the working dir to the head
of the branch.
We don't update the tree (unnecessary treatment and waste of space)
instead, we simply store the current rev number in a file.
Recall that hg rev numbers are local and incremental.
### Response:
def _processChanges(self, unused_output):
"""Send info about pulled changes to the master and record current.
HgPoller does the recording by moving the working dir to the head
of the branch.
We don't update the tree (unnecessary treatment and waste of space)
instead, we simply store the current rev number in a file.
Recall that hg rev numbers are local and incremental.
"""
for branch in self.branches + self.bookmarks:
rev = yield self._getHead(branch)
if rev is None:
# Nothing pulled?
continue
yield self._processBranchChanges(rev, branch) |
def write_bag_file(self, path, encoding=ENCODING):
# type: (Text, Optional[str]) -> IO
"""Write the bag file into our research object."""
self.self_check()
# For some reason below throws BlockingIOError
#fp = BufferedWriter(WritableBagFile(self, path))
bag_file = cast(IO, WritableBagFile(self, path))
if encoding is not None:
# encoding: match Tag-File-Character-Encoding: UTF-8
# newline: ensure LF also on Windows
return cast(IO,
TextIOWrapper(bag_file, encoding=encoding, newline="\n"))
return bag_file | Write the bag file into our research object. | Below is the the instruction that describes the task:
### Input:
Write the bag file into our research object.
### Response:
def write_bag_file(self, path, encoding=ENCODING):
# type: (Text, Optional[str]) -> IO
"""Write the bag file into our research object."""
self.self_check()
# For some reason below throws BlockingIOError
#fp = BufferedWriter(WritableBagFile(self, path))
bag_file = cast(IO, WritableBagFile(self, path))
if encoding is not None:
# encoding: match Tag-File-Character-Encoding: UTF-8
# newline: ensure LF also on Windows
return cast(IO,
TextIOWrapper(bag_file, encoding=encoding, newline="\n"))
return bag_file |
def model_from_path(model_path, fuzziness=False):
"""Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
"""
app_name = '.'.join(model_path.split('.')[:-1])
model_name = model_path.split('.')[-1]
if not app_name:
return None
module = importlib.import_module(app_name)
try:
model = getattr(module, model_name)
except AttributeError:
try:
model = getattr(getattr(module, 'models'), model_name)
except AttributeError:
model = get_model(model_name, app_name, fuzziness=fuzziness)
return model | Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class | Below is the the instruction that describes the task:
### Input:
Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
### Response:
def model_from_path(model_path, fuzziness=False):
"""Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
"""
app_name = '.'.join(model_path.split('.')[:-1])
model_name = model_path.split('.')[-1]
if not app_name:
return None
module = importlib.import_module(app_name)
try:
model = getattr(module, model_name)
except AttributeError:
try:
model = getattr(getattr(module, 'models'), model_name)
except AttributeError:
model = get_model(model_name, app_name, fuzziness=fuzziness)
return model |
def GetResults(self):
"""Retrieves the hashing results.
Returns:
list[AnalyzerResult]: results.
"""
results = []
for hasher in self._hashers:
logger.debug('Processing results for hasher {0:s}'.format(hasher.NAME))
result = analyzer_result.AnalyzerResult()
result.analyzer_name = self.NAME
result.attribute_name = '{0:s}_hash'.format(hasher.NAME)
result.attribute_value = hasher.GetStringDigest()
results.append(result)
return results | Retrieves the hashing results.
Returns:
list[AnalyzerResult]: results. | Below is the the instruction that describes the task:
### Input:
Retrieves the hashing results.
Returns:
list[AnalyzerResult]: results.
### Response:
def GetResults(self):
"""Retrieves the hashing results.
Returns:
list[AnalyzerResult]: results.
"""
results = []
for hasher in self._hashers:
logger.debug('Processing results for hasher {0:s}'.format(hasher.NAME))
result = analyzer_result.AnalyzerResult()
result.analyzer_name = self.NAME
result.attribute_name = '{0:s}_hash'.format(hasher.NAME)
result.attribute_value = hasher.GetStringDigest()
results.append(result)
return results |
def num_channels(self):
"""
Get the number of channels in the input generators.
"""
if(self.inspect_value('index') is None):
if(len(self.generators)>0):
return self.generators[0].num_channels()
return 0
return self.get_current_generator().num_channels() | Get the number of channels in the input generators. | Below is the the instruction that describes the task:
### Input:
Get the number of channels in the input generators.
### Response:
def num_channels(self):
"""
Get the number of channels in the input generators.
"""
if(self.inspect_value('index') is None):
if(len(self.generators)>0):
return self.generators[0].num_channels()
return 0
return self.get_current_generator().num_channels() |
def resolve_mode(self, name):
"""
From given mode name, return mode file path from
``settings.CODEMIRROR_MODES`` map.
Arguments:
name (string): Mode name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_MODES``.
Returns:
string: Mode file path.
"""
if name not in settings.CODEMIRROR_MODES:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_MODES'.")
raise UnknowModeError(msg.format(name))
return settings.CODEMIRROR_MODES.get(name) | From given mode name, return mode file path from
``settings.CODEMIRROR_MODES`` map.
Arguments:
name (string): Mode name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_MODES``.
Returns:
string: Mode file path. | Below is the the instruction that describes the task:
### Input:
From given mode name, return mode file path from
``settings.CODEMIRROR_MODES`` map.
Arguments:
name (string): Mode name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_MODES``.
Returns:
string: Mode file path.
### Response:
def resolve_mode(self, name):
"""
From given mode name, return mode file path from
``settings.CODEMIRROR_MODES`` map.
Arguments:
name (string): Mode name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_MODES``.
Returns:
string: Mode file path.
"""
if name not in settings.CODEMIRROR_MODES:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_MODES'.")
raise UnknowModeError(msg.format(name))
return settings.CODEMIRROR_MODES.get(name) |
def init_sts_session(profile_name, credentials, duration = 28800, session_name = None, save_creds = True):
"""
Fetch STS credentials
:param profile_name:
:param credentials:
:param duration:
:param session_name:
:param save_creds:
:return:
"""
# Set STS arguments
sts_args = {
'DurationSeconds': duration
}
# Prompt for MFA code if MFA serial present
if 'SerialNumber' in credentials and credentials['SerialNumber']:
if not credentials['TokenCode']:
credentials['TokenCode'] = prompt_4_mfa_code()
if credentials['TokenCode'] == 'q':
credentials['SerialNumber'] = None
sts_args['TokenCode'] = credentials['TokenCode']
sts_args['SerialNumber'] = credentials['SerialNumber']
# Init session
sts_client = boto3.session.Session(credentials['AccessKeyId'], credentials['SecretAccessKey']).client('sts')
sts_response = sts_client.get_session_token(**sts_args)
if save_creds:
# Move long-lived credentials if needed
if not profile_name.endswith('-nomfa') and credentials['AccessKeyId'].startswith('AKIA'):
write_creds_to_aws_credentials_file(profile_name + '-nomfa', credentials)
# Save STS values in the .aws/credentials file
write_creds_to_aws_credentials_file(profile_name, sts_response['Credentials'])
return sts_response['Credentials'] | Fetch STS credentials
:param profile_name:
:param credentials:
:param duration:
:param session_name:
:param save_creds:
:return: | Below is the the instruction that describes the task:
### Input:
Fetch STS credentials
:param profile_name:
:param credentials:
:param duration:
:param session_name:
:param save_creds:
:return:
### Response:
def init_sts_session(profile_name, credentials, duration = 28800, session_name = None, save_creds = True):
"""
Fetch STS credentials
:param profile_name:
:param credentials:
:param duration:
:param session_name:
:param save_creds:
:return:
"""
# Set STS arguments
sts_args = {
'DurationSeconds': duration
}
# Prompt for MFA code if MFA serial present
if 'SerialNumber' in credentials and credentials['SerialNumber']:
if not credentials['TokenCode']:
credentials['TokenCode'] = prompt_4_mfa_code()
if credentials['TokenCode'] == 'q':
credentials['SerialNumber'] = None
sts_args['TokenCode'] = credentials['TokenCode']
sts_args['SerialNumber'] = credentials['SerialNumber']
# Init session
sts_client = boto3.session.Session(credentials['AccessKeyId'], credentials['SecretAccessKey']).client('sts')
sts_response = sts_client.get_session_token(**sts_args)
if save_creds:
# Move long-lived credentials if needed
if not profile_name.endswith('-nomfa') and credentials['AccessKeyId'].startswith('AKIA'):
write_creds_to_aws_credentials_file(profile_name + '-nomfa', credentials)
# Save STS values in the .aws/credentials file
write_creds_to_aws_credentials_file(profile_name, sts_response['Credentials'])
return sts_response['Credentials'] |
def centroid(data, method=median):
"returns the central vector of a list of vectors"
out = []
for i in range(len(data[0])):
out.append(method([x[i] for x in data]))
return tuple(out) | returns the central vector of a list of vectors | Below is the the instruction that describes the task:
### Input:
returns the central vector of a list of vectors
### Response:
def centroid(data, method=median):
"returns the central vector of a list of vectors"
out = []
for i in range(len(data[0])):
out.append(method([x[i] for x in data]))
return tuple(out) |
def generate_take(out_f, steps, line_prefix):
"""Generate the take function"""
out_f.write(
'{0}constexpr inline int take(int n_)\n'
'{0}{{\n'
'{0} return {1} 0 {2};\n'
'{0}}}\n'
'\n'.format(
line_prefix,
''.join('n_ >= {0} ? {0} : ('.format(s) for s in steps),
')' * len(steps)
)
) | Generate the take function | Below is the the instruction that describes the task:
### Input:
Generate the take function
### Response:
def generate_take(out_f, steps, line_prefix):
"""Generate the take function"""
out_f.write(
'{0}constexpr inline int take(int n_)\n'
'{0}{{\n'
'{0} return {1} 0 {2};\n'
'{0}}}\n'
'\n'.format(
line_prefix,
''.join('n_ >= {0} ? {0} : ('.format(s) for s in steps),
')' * len(steps)
)
) |
def findBestMatch(self, needle, similarity):
""" Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``.
Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise.
*Developer's Note - Despite the name, this method actually returns the **first** result
with enough similarity, not the **best** result.*
"""
method = cv2.TM_CCOEFF_NORMED
position = None
match = cv2.matchTemplate(self.haystack, needle, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF:
confidence = min_val
if min_val <= 1-similarity:
# Confidence checks out
position = min_loc
else:
confidence = max_val
if max_val >= similarity:
# Confidence checks out
position = max_loc
if not position:
return None
return (position, confidence) | Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``.
Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise.
*Developer's Note - Despite the name, this method actually returns the **first** result
with enough similarity, not the **best** result.* | Below is the the instruction that describes the task:
### Input:
Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``.
Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise.
*Developer's Note - Despite the name, this method actually returns the **first** result
with enough similarity, not the **best** result.*
### Response:
def findBestMatch(self, needle, similarity):
""" Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``.
Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise.
*Developer's Note - Despite the name, this method actually returns the **first** result
with enough similarity, not the **best** result.*
"""
method = cv2.TM_CCOEFF_NORMED
position = None
match = cv2.matchTemplate(self.haystack, needle, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF:
confidence = min_val
if min_val <= 1-similarity:
# Confidence checks out
position = min_loc
else:
confidence = max_val
if max_val >= similarity:
# Confidence checks out
position = max_loc
if not position:
return None
return (position, confidence) |
def insert(self, resource, value):
"""insert(resource, value)
Insert a resource entry into the database. RESOURCE is a
string and VALUE can be any Python value.
"""
# Split res into components and bindings
parts = resource_parts_re.split(resource)
# If the last part is empty, this is an invalid resource
# which we simply ignore
if parts[-1] == '':
return
self.lock.acquire()
db = self.db
for i in range(1, len(parts), 2):
# Create a new mapping/value group
if parts[i - 1] not in db:
db[parts[i - 1]] = ({}, {})
# Use second mapping if a loose binding, first otherwise
if '*' in parts[i]:
db = db[parts[i - 1]][1]
else:
db = db[parts[i - 1]][0]
# Insert value into the derived db
if parts[-1] in db:
db[parts[-1]] = db[parts[-1]][:2] + (value, )
else:
db[parts[-1]] = ({}, {}, value)
self.lock.release() | insert(resource, value)
Insert a resource entry into the database. RESOURCE is a
string and VALUE can be any Python value. | Below is the the instruction that describes the task:
### Input:
insert(resource, value)
Insert a resource entry into the database. RESOURCE is a
string and VALUE can be any Python value.
### Response:
def insert(self, resource, value):
"""insert(resource, value)
Insert a resource entry into the database. RESOURCE is a
string and VALUE can be any Python value.
"""
# Split res into components and bindings
parts = resource_parts_re.split(resource)
# If the last part is empty, this is an invalid resource
# which we simply ignore
if parts[-1] == '':
return
self.lock.acquire()
db = self.db
for i in range(1, len(parts), 2):
# Create a new mapping/value group
if parts[i - 1] not in db:
db[parts[i - 1]] = ({}, {})
# Use second mapping if a loose binding, first otherwise
if '*' in parts[i]:
db = db[parts[i - 1]][1]
else:
db = db[parts[i - 1]][0]
# Insert value into the derived db
if parts[-1] in db:
db[parts[-1]] = db[parts[-1]][:2] + (value, )
else:
db[parts[-1]] = ({}, {}, value)
self.lock.release() |
def current_item(self):
"""Return the current element."""
if self._history and self._index >= 0:
self._check_index()
return self._history[self._index] | Return the current element. | Below is the the instruction that describes the task:
### Input:
Return the current element.
### Response:
def current_item(self):
"""Return the current element."""
if self._history and self._index >= 0:
self._check_index()
return self._history[self._index] |
def enable_backups(self, table_name, model):
"""Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"]
:param table_name: The name of the table to enable Continuous Backups on
:param model: The model to get Continuous Backups settings from
"""
self._tables.pop(table_name, None)
request = {
"TableName": table_name,
"PointInTimeRecoverySpecification": {"PointInTimeRecoveryEnabled": True}
}
try:
self.dynamodb_client.update_continuous_backups(**request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while setting Continuous Backups.") from error | Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"]
:param table_name: The name of the table to enable Continuous Backups on
:param model: The model to get Continuous Backups settings from | Below is the the instruction that describes the task:
### Input:
Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"]
:param table_name: The name of the table to enable Continuous Backups on
:param model: The model to get Continuous Backups settings from
### Response:
def enable_backups(self, table_name, model):
"""Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"]
:param table_name: The name of the table to enable Continuous Backups on
:param model: The model to get Continuous Backups settings from
"""
self._tables.pop(table_name, None)
request = {
"TableName": table_name,
"PointInTimeRecoverySpecification": {"PointInTimeRecoveryEnabled": True}
}
try:
self.dynamodb_client.update_continuous_backups(**request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while setting Continuous Backups.") from error |
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper | Decorator that injects parameters from the configuration. | Below is the the instruction that describes the task:
### Input:
Decorator that injects parameters from the configuration.
### Response:
def args_from_config(func):
"""Decorator that injects parameters from the configuration.
"""
func_args = signature(func).parameters
@wraps(func)
def wrapper(*args, **kwargs):
config = get_config()
for i, argname in enumerate(func_args):
if len(args) > i or argname in kwargs:
continue
elif argname in config:
kwargs[argname] = config[argname]
try:
getcallargs(func, *args, **kwargs)
except TypeError as exc:
msg = "{}\n{}".format(exc.args[0], PALLADIUM_CONFIG_ERROR)
exc.args = (msg,)
raise exc
return func(*args, **kwargs)
wrapper.__wrapped__ = func
return wrapper |
def p_preprocessor_line_line_file(p):
""" preproc_line : _LINE INTEGER STRING
"""
p.lexer.lineno = int(p[2]) + p.lexer.lineno - p.lineno(3) - 1
gl.FILENAME = p[3] | preproc_line : _LINE INTEGER STRING | Below is the the instruction that describes the task:
### Input:
preproc_line : _LINE INTEGER STRING
### Response:
def p_preprocessor_line_line_file(p):
""" preproc_line : _LINE INTEGER STRING
"""
p.lexer.lineno = int(p[2]) + p.lexer.lineno - p.lineno(3) - 1
gl.FILENAME = p[3] |
def sorted_analyses(self, analyses):
"""Sort the analyses by AR ID ascending and subsorted by priority
sortkey within the AR they belong to
"""
analyses = sorted(analyses, key=lambda an: an.getRequestID())
def sorted_by_sortkey(objs):
return sorted(objs, key=lambda an: an.getPrioritySortkey())
# Now, we need the analyses within a request ID to be sorted by
# sortkey (sortable_title index), so it will appear in the same
# order as they appear in Analyses list from AR view
current_sample_id = None
current_analyses = []
sorted_analyses = []
for analysis in analyses:
sample_id = analysis.getRequestID()
if sample_id and current_sample_id != sample_id:
# Sort the brains we've collected until now, that
# belong to the same Analysis Request
current_analyses = sorted_by_sortkey(current_analyses)
sorted_analyses.extend(current_analyses)
current_sample_id = sample_id
current_analyses = []
# Now we are inside the same AR
current_analyses.append(analysis)
continue
# Sort the last set of brains we've collected
current_analyses = sorted_by_sortkey(current_analyses)
sorted_analyses.extend(current_analyses)
return sorted_analyses | Sort the analyses by AR ID ascending and subsorted by priority
sortkey within the AR they belong to | Below is the the instruction that describes the task:
### Input:
Sort the analyses by AR ID ascending and subsorted by priority
sortkey within the AR they belong to
### Response:
def sorted_analyses(self, analyses):
"""Sort the analyses by AR ID ascending and subsorted by priority
sortkey within the AR they belong to
"""
analyses = sorted(analyses, key=lambda an: an.getRequestID())
def sorted_by_sortkey(objs):
return sorted(objs, key=lambda an: an.getPrioritySortkey())
# Now, we need the analyses within a request ID to be sorted by
# sortkey (sortable_title index), so it will appear in the same
# order as they appear in Analyses list from AR view
current_sample_id = None
current_analyses = []
sorted_analyses = []
for analysis in analyses:
sample_id = analysis.getRequestID()
if sample_id and current_sample_id != sample_id:
# Sort the brains we've collected until now, that
# belong to the same Analysis Request
current_analyses = sorted_by_sortkey(current_analyses)
sorted_analyses.extend(current_analyses)
current_sample_id = sample_id
current_analyses = []
# Now we are inside the same AR
current_analyses.append(analysis)
continue
# Sort the last set of brains we've collected
current_analyses = sorted_by_sortkey(current_analyses)
sorted_analyses.extend(current_analyses)
return sorted_analyses |
def get_resource_mdata():
"""Return default mdata map for Resource"""
return {
'group': {
'element_label': {
'text': 'group',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter either true or false.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [None],
'syntax': 'BOOLEAN',
},
'avatar': {
'element_label': {
'text': 'avatar',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
} | Return default mdata map for Resource | Below is the the instruction that describes the task:
### Input:
Return default mdata map for Resource
### Response:
def get_resource_mdata():
"""Return default mdata map for Resource"""
return {
'group': {
'element_label': {
'text': 'group',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter either true or false.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [None],
'syntax': 'BOOLEAN',
},
'avatar': {
'element_label': {
'text': 'avatar',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
} |
def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):
"""
Verifies an RSA, DSA or ECDSA signature via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
algo = certificate_or_public_key.algorithm
if algo == 'rsa' and rsa_pss_padding:
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}.get(hash_algorithm, 0)
decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature)
key_size = certificate_or_public_key.bit_size
if not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature):
raise SignatureError('Signature is invalid')
return
if algo == 'rsa' and hash_algorithm == 'raw':
padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature)
try:
plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext)
if not constant_compare(plaintext, data):
raise ValueError()
except (ValueError):
raise SignatureError('Signature is invalid')
return
hash_handle = None
try:
alg_id = {
'md5': Advapi32Const.CALG_MD5,
'sha1': Advapi32Const.CALG_SHA1,
'sha256': Advapi32Const.CALG_SHA_256,
'sha384': Advapi32Const.CALG_SHA_384,
'sha512': Advapi32Const.CALG_SHA_512,
}[hash_algorithm]
hash_handle_pointer = new(advapi32, 'HCRYPTHASH *')
res = advapi32.CryptCreateHash(
certificate_or_public_key.context_handle,
alg_id,
null(),
0,
hash_handle_pointer
)
handle_error(res)
hash_handle = unwrap(hash_handle_pointer)
res = advapi32.CryptHashData(hash_handle, data, len(data), 0)
handle_error(res)
if algo == 'dsa':
# Windows doesn't use the ASN.1 Sequence for DSA signatures,
# so we have to convert it here for the verification to work
try:
signature = algos.DSASignature.load(signature).to_p1363()
# Switch the two integers so that the reversal later will
# result in the correct order
half_len = len(signature) // 2
signature = signature[half_len:] + signature[:half_len]
except (ValueError, OverflowError, TypeError):
raise SignatureError('Signature is invalid')
# The CryptoAPI expects signatures to be in little endian byte order,
# which is the opposite of other systems, so we must reverse it
reversed_signature = signature[::-1]
res = advapi32.CryptVerifySignatureW(
hash_handle,
reversed_signature,
len(signature),
certificate_or_public_key.key_handle,
null(),
0
)
handle_error(res)
finally:
if hash_handle:
advapi32.CryptDestroyHash(hash_handle) | Verifies an RSA, DSA or ECDSA signature via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library | Below is the the instruction that describes the task:
### Input:
Verifies an RSA, DSA or ECDSA signature via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
### Response:
def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):
"""
Verifies an RSA, DSA or ECDSA signature via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
algo = certificate_or_public_key.algorithm
if algo == 'rsa' and rsa_pss_padding:
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}.get(hash_algorithm, 0)
decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature)
key_size = certificate_or_public_key.bit_size
if not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature):
raise SignatureError('Signature is invalid')
return
if algo == 'rsa' and hash_algorithm == 'raw':
padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature)
try:
plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext)
if not constant_compare(plaintext, data):
raise ValueError()
except (ValueError):
raise SignatureError('Signature is invalid')
return
hash_handle = None
try:
alg_id = {
'md5': Advapi32Const.CALG_MD5,
'sha1': Advapi32Const.CALG_SHA1,
'sha256': Advapi32Const.CALG_SHA_256,
'sha384': Advapi32Const.CALG_SHA_384,
'sha512': Advapi32Const.CALG_SHA_512,
}[hash_algorithm]
hash_handle_pointer = new(advapi32, 'HCRYPTHASH *')
res = advapi32.CryptCreateHash(
certificate_or_public_key.context_handle,
alg_id,
null(),
0,
hash_handle_pointer
)
handle_error(res)
hash_handle = unwrap(hash_handle_pointer)
res = advapi32.CryptHashData(hash_handle, data, len(data), 0)
handle_error(res)
if algo == 'dsa':
# Windows doesn't use the ASN.1 Sequence for DSA signatures,
# so we have to convert it here for the verification to work
try:
signature = algos.DSASignature.load(signature).to_p1363()
# Switch the two integers so that the reversal later will
# result in the correct order
half_len = len(signature) // 2
signature = signature[half_len:] + signature[:half_len]
except (ValueError, OverflowError, TypeError):
raise SignatureError('Signature is invalid')
# The CryptoAPI expects signatures to be in little endian byte order,
# which is the opposite of other systems, so we must reverse it
reversed_signature = signature[::-1]
res = advapi32.CryptVerifySignatureW(
hash_handle,
reversed_signature,
len(signature),
certificate_or_public_key.key_handle,
null(),
0
)
handle_error(res)
finally:
if hash_handle:
advapi32.CryptDestroyHash(hash_handle) |
def validate_full_path(cls, full_path, **kwargs):
"""Helper method to parse a full or partial path and
return a full path as well as a dict containing path parts.
Uses the following rules when processing the path:
* If no domain, uses the current user's account domain
* If no vault, uses the current user's personal vault.
* If no path, uses '/' (vault root)
Returns a tuple containing:
* The validated full_path
* A dictionary with the components:
* domain: the domain of the vault
* vault: the name of the vault, without domain
* vault_full_path: domain:vault
* path: the object path within the vault
* parent_path: the parent path to the object
* filename: the object's filename (if any)
* full_path: the validated full path
The following components may be overridden using kwargs:
* vault
* path
Object paths (also known as "paths") must begin with a forward slash.
The following path formats are supported:
domain:vault:/path -> object "path" in the root of "domain:vault"
domain:vault/path -> object "path" in the root of "domain:vault"
vault:/path -> object "path" in the root of "vault"
vault/path -> object "path" in the root of "vault"
~/path -> object "path" in the root of personal vault
vault/ -> root of "vault"
~/ -> root of your personal vault
The following two formats are not supported:
path -> invalid/ambiguous path (exception)
vault:path -> invalid/ambiguous path (exception)
vault:path/path -> unsupported, interpreted as domain:vault/path
"""
from solvebio.resource.vault import Vault
_client = kwargs.pop('client', None) or cls._client or client
if not full_path:
raise Exception(
'Invalid path: ',
'Full path must be in one of the following formats: '
'"vault:/path", "domain:vault:/path", or "~/path"')
# Parse the vault's full_path, using overrides if any
input_vault = kwargs.get('vault') or full_path
try:
vault_full_path, path_dict = \
Vault.validate_full_path(input_vault, client=_client)
except Exception as err:
raise Exception('Could not determine vault from "{0}": {1}'
.format(input_vault, err))
if kwargs.get('path'):
# Allow override of the object_path.
full_path = '{0}:/{1}'.format(vault_full_path, kwargs['path'])
match = cls.PATH_RE.match(full_path)
if match:
object_path = match.groupdict()['path']
else:
raise Exception(
'Cannot find a valid object path in "{0}". '
'Full path must be in one of the following formats: '
'"vault:/path", "domain:vault:/path", or "~/path"'
.format(full_path))
# Remove double slashes
object_path = re.sub('//+', '/', object_path)
if object_path != '/':
# Remove trailing slash
object_path = object_path.rstrip('/')
path_dict['path'] = object_path
# TODO: parent_path and filename
full_path = '{domain}:{vault}:{path}'.format(**path_dict)
path_dict['full_path'] = full_path
return full_path, path_dict | Helper method to parse a full or partial path and
return a full path as well as a dict containing path parts.
Uses the following rules when processing the path:
* If no domain, uses the current user's account domain
* If no vault, uses the current user's personal vault.
* If no path, uses '/' (vault root)
Returns a tuple containing:
* The validated full_path
* A dictionary with the components:
* domain: the domain of the vault
* vault: the name of the vault, without domain
* vault_full_path: domain:vault
* path: the object path within the vault
* parent_path: the parent path to the object
* filename: the object's filename (if any)
* full_path: the validated full path
The following components may be overridden using kwargs:
* vault
* path
Object paths (also known as "paths") must begin with a forward slash.
The following path formats are supported:
domain:vault:/path -> object "path" in the root of "domain:vault"
domain:vault/path -> object "path" in the root of "domain:vault"
vault:/path -> object "path" in the root of "vault"
vault/path -> object "path" in the root of "vault"
~/path -> object "path" in the root of personal vault
vault/ -> root of "vault"
~/ -> root of your personal vault
The following two formats are not supported:
path -> invalid/ambiguous path (exception)
vault:path -> invalid/ambiguous path (exception)
vault:path/path -> unsupported, interpreted as domain:vault/path | Below is the the instruction that describes the task:
### Input:
Helper method to parse a full or partial path and
return a full path as well as a dict containing path parts.
Uses the following rules when processing the path:
* If no domain, uses the current user's account domain
* If no vault, uses the current user's personal vault.
* If no path, uses '/' (vault root)
Returns a tuple containing:
* The validated full_path
* A dictionary with the components:
* domain: the domain of the vault
* vault: the name of the vault, without domain
* vault_full_path: domain:vault
* path: the object path within the vault
* parent_path: the parent path to the object
* filename: the object's filename (if any)
* full_path: the validated full path
The following components may be overridden using kwargs:
* vault
* path
Object paths (also known as "paths") must begin with a forward slash.
The following path formats are supported:
domain:vault:/path -> object "path" in the root of "domain:vault"
domain:vault/path -> object "path" in the root of "domain:vault"
vault:/path -> object "path" in the root of "vault"
vault/path -> object "path" in the root of "vault"
~/path -> object "path" in the root of personal vault
vault/ -> root of "vault"
~/ -> root of your personal vault
The following two formats are not supported:
path -> invalid/ambiguous path (exception)
vault:path -> invalid/ambiguous path (exception)
vault:path/path -> unsupported, interpreted as domain:vault/path
### Response:
def validate_full_path(cls, full_path, **kwargs):
"""Helper method to parse a full or partial path and
return a full path as well as a dict containing path parts.
Uses the following rules when processing the path:
* If no domain, uses the current user's account domain
* If no vault, uses the current user's personal vault.
* If no path, uses '/' (vault root)
Returns a tuple containing:
* The validated full_path
* A dictionary with the components:
* domain: the domain of the vault
* vault: the name of the vault, without domain
* vault_full_path: domain:vault
* path: the object path within the vault
* parent_path: the parent path to the object
* filename: the object's filename (if any)
* full_path: the validated full path
The following components may be overridden using kwargs:
* vault
* path
Object paths (also known as "paths") must begin with a forward slash.
The following path formats are supported:
domain:vault:/path -> object "path" in the root of "domain:vault"
domain:vault/path -> object "path" in the root of "domain:vault"
vault:/path -> object "path" in the root of "vault"
vault/path -> object "path" in the root of "vault"
~/path -> object "path" in the root of personal vault
vault/ -> root of "vault"
~/ -> root of your personal vault
The following two formats are not supported:
path -> invalid/ambiguous path (exception)
vault:path -> invalid/ambiguous path (exception)
vault:path/path -> unsupported, interpreted as domain:vault/path
"""
from solvebio.resource.vault import Vault
_client = kwargs.pop('client', None) or cls._client or client
if not full_path:
raise Exception(
'Invalid path: ',
'Full path must be in one of the following formats: '
'"vault:/path", "domain:vault:/path", or "~/path"')
# Parse the vault's full_path, using overrides if any
input_vault = kwargs.get('vault') or full_path
try:
vault_full_path, path_dict = \
Vault.validate_full_path(input_vault, client=_client)
except Exception as err:
raise Exception('Could not determine vault from "{0}": {1}'
.format(input_vault, err))
if kwargs.get('path'):
# Allow override of the object_path.
full_path = '{0}:/{1}'.format(vault_full_path, kwargs['path'])
match = cls.PATH_RE.match(full_path)
if match:
object_path = match.groupdict()['path']
else:
raise Exception(
'Cannot find a valid object path in "{0}". '
'Full path must be in one of the following formats: '
'"vault:/path", "domain:vault:/path", or "~/path"'
.format(full_path))
# Remove double slashes
object_path = re.sub('//+', '/', object_path)
if object_path != '/':
# Remove trailing slash
object_path = object_path.rstrip('/')
path_dict['path'] = object_path
# TODO: parent_path and filename
full_path = '{domain}:{vault}:{path}'.format(**path_dict)
path_dict['full_path'] = full_path
return full_path, path_dict |
def compile_config(path,
source=None,
config_name=None,
config_data=None,
config_data_source=None,
script_parameters=None,
salt_env='base'):
r'''
Compile a config from a PowerShell script (``.ps1``)
Args:
path (str): Path (local) to the script that will create the ``.mof``
configuration file. If no source is passed, the file must exist
locally. Required.
source (str): Path to the script on ``file_roots`` to cache at the
location specified by ``path``. The source file will be cached
locally and then executed. If source is not passed, the config
script located at ``path`` will be compiled. Optional.
config_name (str): The name of the Configuration within the script to
apply. If the script contains multiple configurations within the
file a ``config_name`` must be specified. If the ``config_name`` is
not specified, the name of the file will be used as the
``config_name`` to run. Optional.
config_data (str): Configuration data in the form of a hash table that
will be passed to the ``ConfigurationData`` parameter when the
``config_name`` is compiled. This can be the path to a ``.psd1``
file containing the proper hash table or the PowerShell code to
create the hash table.
.. versionadded:: 2017.7.0
config_data_source (str): The path to the ``.psd1`` file on
``file_roots`` to cache at the location specified by
``config_data``. If this is specified, ``config_data`` must be a
local path instead of a hash table.
.. versionadded:: 2017.7.0
script_parameters (str): Any additional parameters expected by the
configuration script. These must be defined in the script itself.
.. versionadded:: 2017.7.0
salt_env (str): The salt environment to use when copying the source.
Default is 'base'
Returns:
dict: A dictionary containing the results of the compilation
CLI Example:
To compile a config from a script that already exists on the system:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1
To cache a config script to the system from the master and compile it:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
'''
if source:
log.info('DSC: Caching %s', source)
cached_files = __salt__['cp.get_file'](path=source,
dest=path,
saltenv=salt_env,
makedirs=True)
if not cached_files:
error = 'Failed to cache {0}'.format(source)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
if config_data_source:
log.info('DSC: Caching %s', config_data_source)
cached_files = __salt__['cp.get_file'](path=config_data_source,
dest=config_data,
saltenv=salt_env,
makedirs=True)
if not cached_files:
error = 'Failed to cache {0}'.format(config_data_source)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
# Make sure the path exists
if not os.path.exists(path):
error = '"{0}" not found'.format(path)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
if config_name is None:
# If the name of the config isn't passed, make it the name of the .ps1
config_name = os.path.splitext(os.path.basename(path))[0]
cwd = os.path.dirname(path)
# Run the script and see if the compile command is in the script
cmd = [path]
# Add any script parameters
if script_parameters:
cmd.append(script_parameters)
# Select fields to return
cmd.append('| Select-Object -Property FullName, Extension, Exists, '
'@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) '
'-Format g}}')
cmd = ' '.join(cmd)
ret = _pshell(cmd, cwd)
if ret:
# Script compiled, return results
if ret.get('Exists'):
log.info('DSC: Compile Config: %s', ret)
return ret
# If you get to this point, the script did not contain a compile command
# dot source the script to compile the state and generate the mof file
cmd = ['.', path]
if script_parameters:
cmd.append(script_parameters)
cmd.extend([';', config_name])
if config_data:
cmd.append(config_data)
cmd.append('| Select-Object -Property FullName, Extension, Exists, '
'@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) '
'-Format g}}')
cmd = ' '.join(cmd)
ret = _pshell(cmd, cwd)
if ret:
# Script compiled, return results
if ret.get('Exists'):
log.info('DSC: Compile Config: %s', ret)
return ret
error = 'Failed to compile config: {0}'.format(path)
error += '\nReturned: {0}'.format(ret)
log.error('DSC: %s', error)
raise CommandExecutionError(error) | r'''
Compile a config from a PowerShell script (``.ps1``)
Args:
path (str): Path (local) to the script that will create the ``.mof``
configuration file. If no source is passed, the file must exist
locally. Required.
source (str): Path to the script on ``file_roots`` to cache at the
location specified by ``path``. The source file will be cached
locally and then executed. If source is not passed, the config
script located at ``path`` will be compiled. Optional.
config_name (str): The name of the Configuration within the script to
apply. If the script contains multiple configurations within the
file a ``config_name`` must be specified. If the ``config_name`` is
not specified, the name of the file will be used as the
``config_name`` to run. Optional.
config_data (str): Configuration data in the form of a hash table that
will be passed to the ``ConfigurationData`` parameter when the
``config_name`` is compiled. This can be the path to a ``.psd1``
file containing the proper hash table or the PowerShell code to
create the hash table.
.. versionadded:: 2017.7.0
config_data_source (str): The path to the ``.psd1`` file on
``file_roots`` to cache at the location specified by
``config_data``. If this is specified, ``config_data`` must be a
local path instead of a hash table.
.. versionadded:: 2017.7.0
script_parameters (str): Any additional parameters expected by the
configuration script. These must be defined in the script itself.
.. versionadded:: 2017.7.0
salt_env (str): The salt environment to use when copying the source.
Default is 'base'
Returns:
dict: A dictionary containing the results of the compilation
CLI Example:
To compile a config from a script that already exists on the system:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1
To cache a config script to the system from the master and compile it:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 | Below is the the instruction that describes the task:
### Input:
r'''
Compile a config from a PowerShell script (``.ps1``)
Args:
path (str): Path (local) to the script that will create the ``.mof``
configuration file. If no source is passed, the file must exist
locally. Required.
source (str): Path to the script on ``file_roots`` to cache at the
location specified by ``path``. The source file will be cached
locally and then executed. If source is not passed, the config
script located at ``path`` will be compiled. Optional.
config_name (str): The name of the Configuration within the script to
apply. If the script contains multiple configurations within the
file a ``config_name`` must be specified. If the ``config_name`` is
not specified, the name of the file will be used as the
``config_name`` to run. Optional.
config_data (str): Configuration data in the form of a hash table that
will be passed to the ``ConfigurationData`` parameter when the
``config_name`` is compiled. This can be the path to a ``.psd1``
file containing the proper hash table or the PowerShell code to
create the hash table.
.. versionadded:: 2017.7.0
config_data_source (str): The path to the ``.psd1`` file on
``file_roots`` to cache at the location specified by
``config_data``. If this is specified, ``config_data`` must be a
local path instead of a hash table.
.. versionadded:: 2017.7.0
script_parameters (str): Any additional parameters expected by the
configuration script. These must be defined in the script itself.
.. versionadded:: 2017.7.0
salt_env (str): The salt environment to use when copying the source.
Default is 'base'
Returns:
dict: A dictionary containing the results of the compilation
CLI Example:
To compile a config from a script that already exists on the system:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1
To cache a config script to the system from the master and compile it:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
### Response:
def compile_config(path,
source=None,
config_name=None,
config_data=None,
config_data_source=None,
script_parameters=None,
salt_env='base'):
r'''
Compile a config from a PowerShell script (``.ps1``)
Args:
path (str): Path (local) to the script that will create the ``.mof``
configuration file. If no source is passed, the file must exist
locally. Required.
source (str): Path to the script on ``file_roots`` to cache at the
location specified by ``path``. The source file will be cached
locally and then executed. If source is not passed, the config
script located at ``path`` will be compiled. Optional.
config_name (str): The name of the Configuration within the script to
apply. If the script contains multiple configurations within the
file a ``config_name`` must be specified. If the ``config_name`` is
not specified, the name of the file will be used as the
``config_name`` to run. Optional.
config_data (str): Configuration data in the form of a hash table that
will be passed to the ``ConfigurationData`` parameter when the
``config_name`` is compiled. This can be the path to a ``.psd1``
file containing the proper hash table or the PowerShell code to
create the hash table.
.. versionadded:: 2017.7.0
config_data_source (str): The path to the ``.psd1`` file on
``file_roots`` to cache at the location specified by
``config_data``. If this is specified, ``config_data`` must be a
local path instead of a hash table.
.. versionadded:: 2017.7.0
script_parameters (str): Any additional parameters expected by the
configuration script. These must be defined in the script itself.
.. versionadded:: 2017.7.0
salt_env (str): The salt environment to use when copying the source.
Default is 'base'
Returns:
dict: A dictionary containing the results of the compilation
CLI Example:
To compile a config from a script that already exists on the system:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1
To cache a config script to the system from the master and compile it:
.. code-block:: bash
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
'''
if source:
log.info('DSC: Caching %s', source)
cached_files = __salt__['cp.get_file'](path=source,
dest=path,
saltenv=salt_env,
makedirs=True)
if not cached_files:
error = 'Failed to cache {0}'.format(source)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
if config_data_source:
log.info('DSC: Caching %s', config_data_source)
cached_files = __salt__['cp.get_file'](path=config_data_source,
dest=config_data,
saltenv=salt_env,
makedirs=True)
if not cached_files:
error = 'Failed to cache {0}'.format(config_data_source)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
# Make sure the path exists
if not os.path.exists(path):
error = '"{0}" not found'.format(path)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
if config_name is None:
# If the name of the config isn't passed, make it the name of the .ps1
config_name = os.path.splitext(os.path.basename(path))[0]
cwd = os.path.dirname(path)
# Run the script and see if the compile command is in the script
cmd = [path]
# Add any script parameters
if script_parameters:
cmd.append(script_parameters)
# Select fields to return
cmd.append('| Select-Object -Property FullName, Extension, Exists, '
'@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) '
'-Format g}}')
cmd = ' '.join(cmd)
ret = _pshell(cmd, cwd)
if ret:
# Script compiled, return results
if ret.get('Exists'):
log.info('DSC: Compile Config: %s', ret)
return ret
# If you get to this point, the script did not contain a compile command
# dot source the script to compile the state and generate the mof file
cmd = ['.', path]
if script_parameters:
cmd.append(script_parameters)
cmd.extend([';', config_name])
if config_data:
cmd.append(config_data)
cmd.append('| Select-Object -Property FullName, Extension, Exists, '
'@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) '
'-Format g}}')
cmd = ' '.join(cmd)
ret = _pshell(cmd, cwd)
if ret:
# Script compiled, return results
if ret.get('Exists'):
log.info('DSC: Compile Config: %s', ret)
return ret
error = 'Failed to compile config: {0}'.format(path)
error += '\nReturned: {0}'.format(ret)
log.error('DSC: %s', error)
raise CommandExecutionError(error) |
def get_shape(bin_edges, sid):
"""
:returns:
the shape of the disaggregation matrix for the given site, of form
(#mags-1, #dists-1, #lons-1, #lats-1, #eps-1)
"""
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges
return (len(mag_bins) - 1, len(dist_bins) - 1,
len(lon_bins[sid]) - 1, len(lat_bins[sid]) - 1, len(eps_bins) - 1) | :returns:
the shape of the disaggregation matrix for the given site, of form
(#mags-1, #dists-1, #lons-1, #lats-1, #eps-1) | Below is the the instruction that describes the task:
### Input:
:returns:
the shape of the disaggregation matrix for the given site, of form
(#mags-1, #dists-1, #lons-1, #lats-1, #eps-1)
### Response:
def get_shape(bin_edges, sid):
"""
:returns:
the shape of the disaggregation matrix for the given site, of form
(#mags-1, #dists-1, #lons-1, #lats-1, #eps-1)
"""
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges
return (len(mag_bins) - 1, len(dist_bins) - 1,
len(lon_bins[sid]) - 1, len(lat_bins[sid]) - 1, len(eps_bins) - 1) |
def _execute(self, api_command, *, timeout=None):
"""Execute the command."""
if api_command.observe:
self._observe(api_command)
return
method = api_command.method
path = api_command.path
data = api_command.data
parse_json = api_command.parse_json
url = api_command.url(self._host)
proc_timeout = self._timeout
if timeout is not None:
proc_timeout = timeout
command = self._base_command(method)
kwargs = {
'stderr': subprocess.DEVNULL,
'timeout': proc_timeout,
'universal_newlines': True,
}
if data is not None:
kwargs['input'] = json.dumps(data)
command.append('-f')
command.append('-')
_LOGGER.debug('Executing %s %s %s: %s', self._host, method, path,
data)
else:
_LOGGER.debug('Executing %s %s %s', self._host, method, path)
command.append(url)
try:
return_value = subprocess.check_output(command, **kwargs)
except subprocess.TimeoutExpired:
raise RequestTimeout() from None
except subprocess.CalledProcessError as err:
raise RequestError(
'Error executing request: {}'.format(err)) from None
api_command.result = _process_output(return_value, parse_json)
return api_command.result | Execute the command. | Below is the the instruction that describes the task:
### Input:
Execute the command.
### Response:
def _execute(self, api_command, *, timeout=None):
"""Execute the command."""
if api_command.observe:
self._observe(api_command)
return
method = api_command.method
path = api_command.path
data = api_command.data
parse_json = api_command.parse_json
url = api_command.url(self._host)
proc_timeout = self._timeout
if timeout is not None:
proc_timeout = timeout
command = self._base_command(method)
kwargs = {
'stderr': subprocess.DEVNULL,
'timeout': proc_timeout,
'universal_newlines': True,
}
if data is not None:
kwargs['input'] = json.dumps(data)
command.append('-f')
command.append('-')
_LOGGER.debug('Executing %s %s %s: %s', self._host, method, path,
data)
else:
_LOGGER.debug('Executing %s %s %s', self._host, method, path)
command.append(url)
try:
return_value = subprocess.check_output(command, **kwargs)
except subprocess.TimeoutExpired:
raise RequestTimeout() from None
except subprocess.CalledProcessError as err:
raise RequestError(
'Error executing request: {}'.format(err)) from None
api_command.result = _process_output(return_value, parse_json)
return api_command.result |
def _check_dimension(typeattr, unit_id=None):
"""
Check that the unit and dimension on a type attribute match.
Alternatively, pass in a unit manually to check against the dimension
of the type attribute
"""
if unit_id is None:
unit_id = typeattr.unit_id
dimension_id = _get_attr(typeattr.attr_id).dimension_id
if unit_id is not None and dimension_id is None:
# First error case
unit_dimension_id = units.get_dimension_by_unit_id(unit_id).id
raise HydraError("Unit %s (abbreviation=%s) has dimension_id %s(name=%s), but attribute has no dimension"%
(unit_id, units.get_unit(unit_id).abbreviation,
unit_dimension_id, units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True).name))
elif unit_id is not None and dimension_id is not None:
unit_dimension_id = units.get_dimension_by_unit_id(unit_id).id
if unit_dimension_id != dimension_id:
# Only error case
raise HydraError("Unit %s (abbreviation=%s) has dimension_id %s(name=%s), but attribute has dimension_id %s(name=%s)"%
(unit_id, units.get_unit(unit_id).abbreviation,
unit_dimension_id, units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True).name,
dimension_id, units.get_dimension(dimension_id, do_accept_dimension_id_none=True).name)) | Check that the unit and dimension on a type attribute match.
Alternatively, pass in a unit manually to check against the dimension
of the type attribute | Below is the the instruction that describes the task:
### Input:
Check that the unit and dimension on a type attribute match.
Alternatively, pass in a unit manually to check against the dimension
of the type attribute
### Response:
def _check_dimension(typeattr, unit_id=None):
"""
Check that the unit and dimension on a type attribute match.
Alternatively, pass in a unit manually to check against the dimension
of the type attribute
"""
if unit_id is None:
unit_id = typeattr.unit_id
dimension_id = _get_attr(typeattr.attr_id).dimension_id
if unit_id is not None and dimension_id is None:
# First error case
unit_dimension_id = units.get_dimension_by_unit_id(unit_id).id
raise HydraError("Unit %s (abbreviation=%s) has dimension_id %s(name=%s), but attribute has no dimension"%
(unit_id, units.get_unit(unit_id).abbreviation,
unit_dimension_id, units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True).name))
elif unit_id is not None and dimension_id is not None:
unit_dimension_id = units.get_dimension_by_unit_id(unit_id).id
if unit_dimension_id != dimension_id:
# Only error case
raise HydraError("Unit %s (abbreviation=%s) has dimension_id %s(name=%s), but attribute has dimension_id %s(name=%s)"%
(unit_id, units.get_unit(unit_id).abbreviation,
unit_dimension_id, units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True).name,
dimension_id, units.get_dimension(dimension_id, do_accept_dimension_id_none=True).name)) |
def _is_link(fs, path):
"""
Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISLNK(fs.lstat(path).st_mode)
except exceptions.FileNotFound:
return False | Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up. | Below is the the instruction that describes the task:
### Input:
Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
### Response:
def _is_link(fs, path):
"""
Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISLNK(fs.lstat(path).st_mode)
except exceptions.FileNotFound:
return False |
def get_val(self):
"""
Gets attribute's value.
@return: stored value.
@rtype: int
@raise IOError: if corresponding file in /proc/sys cannot be read.
"""
with open(os.path.join(self._base, self._attr), 'r') as file_obj:
return int(file_obj.readline()) | Gets attribute's value.
@return: stored value.
@rtype: int
@raise IOError: if corresponding file in /proc/sys cannot be read. | Below is the the instruction that describes the task:
### Input:
Gets attribute's value.
@return: stored value.
@rtype: int
@raise IOError: if corresponding file in /proc/sys cannot be read.
### Response:
def get_val(self):
"""
Gets attribute's value.
@return: stored value.
@rtype: int
@raise IOError: if corresponding file in /proc/sys cannot be read.
"""
with open(os.path.join(self._base, self._attr), 'r') as file_obj:
return int(file_obj.readline()) |
def _generate_non_lastnames_variations(non_lastnames):
"""Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
"""
if not non_lastnames:
return []
# Generate name transformations in place for all non lastnames. Transformations include:
# 1. Drop non last name, 2. use initial, 3. use full non lastname
for idx, non_lastname in enumerate(non_lastnames):
non_lastnames[idx] = (u'', non_lastname[0], non_lastname)
# Generate the cartesian product of the transformed non lastnames and flatten them.
return [
(u' '.join(var_elem for var_elem in variation if var_elem)).strip()
for variation in product(*non_lastnames)
] | Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
] | Below is the the instruction that describes the task:
### Input:
Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
### Response:
def _generate_non_lastnames_variations(non_lastnames):
"""Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
"""
if not non_lastnames:
return []
# Generate name transformations in place for all non lastnames. Transformations include:
# 1. Drop non last name, 2. use initial, 3. use full non lastname
for idx, non_lastname in enumerate(non_lastnames):
non_lastnames[idx] = (u'', non_lastname[0], non_lastname)
# Generate the cartesian product of the transformed non lastnames and flatten them.
return [
(u' '.join(var_elem for var_elem in variation if var_elem)).strip()
for variation in product(*non_lastnames)
] |
def handle_message(self, ch, method, properties, body):
"""
this is a pika.basic_consumer callback
handles client inputs, runs appropriate workflows and views
Args:
ch: amqp channel
method: amqp method
properties:
body: message body
"""
input = {}
headers = {}
try:
self.sessid = method.routing_key
input = json_decode(body)
data = input['data']
# since this comes as "path" we dont know if it's view or workflow yet
# TODO: just a workaround till we modify ui to
if 'path' in data:
if data['path'] in VIEW_METHODS:
data['view'] = data['path']
else:
data['wf'] = data['path']
session = Session(self.sessid)
headers = {'remote_ip': input['_zops_remote_ip'],
'source': input['_zops_source']}
if 'wf' in data:
output = self._handle_workflow(session, data, headers)
elif 'job' in data:
self._handle_job(session, data, headers)
return
else:
output = self._handle_view(session, data, headers)
except HTTPError as e:
import sys
if hasattr(sys, '_called_from_test'):
raise
output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code}
log.exception("Http error occurred")
except:
self.current = Current(session=session, input=data)
self.current.headers = headers
import sys
if hasattr(sys, '_called_from_test'):
raise
err = traceback.format_exc()
output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500}
log.exception("Worker error occurred with messsage body:\n%s" % body)
if 'callbackID' in input:
output['callbackID'] = input['callbackID']
log.info("OUTPUT for %s: %s" % (self.sessid, output))
output['reply_timestamp'] = time()
self.send_output(output) | this is a pika.basic_consumer callback
handles client inputs, runs appropriate workflows and views
Args:
ch: amqp channel
method: amqp method
properties:
body: message body | Below is the the instruction that describes the task:
### Input:
this is a pika.basic_consumer callback
handles client inputs, runs appropriate workflows and views
Args:
ch: amqp channel
method: amqp method
properties:
body: message body
### Response:
def handle_message(self, ch, method, properties, body):
"""
this is a pika.basic_consumer callback
handles client inputs, runs appropriate workflows and views
Args:
ch: amqp channel
method: amqp method
properties:
body: message body
"""
input = {}
headers = {}
try:
self.sessid = method.routing_key
input = json_decode(body)
data = input['data']
# since this comes as "path" we dont know if it's view or workflow yet
# TODO: just a workaround till we modify ui to
if 'path' in data:
if data['path'] in VIEW_METHODS:
data['view'] = data['path']
else:
data['wf'] = data['path']
session = Session(self.sessid)
headers = {'remote_ip': input['_zops_remote_ip'],
'source': input['_zops_source']}
if 'wf' in data:
output = self._handle_workflow(session, data, headers)
elif 'job' in data:
self._handle_job(session, data, headers)
return
else:
output = self._handle_view(session, data, headers)
except HTTPError as e:
import sys
if hasattr(sys, '_called_from_test'):
raise
output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code}
log.exception("Http error occurred")
except:
self.current = Current(session=session, input=data)
self.current.headers = headers
import sys
if hasattr(sys, '_called_from_test'):
raise
err = traceback.format_exc()
output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500}
log.exception("Worker error occurred with messsage body:\n%s" % body)
if 'callbackID' in input:
output['callbackID'] = input['callbackID']
log.info("OUTPUT for %s: %s" % (self.sessid, output))
output['reply_timestamp'] = time()
self.send_output(output) |
def delete_lbaas_l7rule(self, l7rule, l7policy):
"""Deletes the specified L7 rule."""
return self.delete(self.lbaas_l7rule_path % (l7policy, l7rule)) | Deletes the specified L7 rule. | Below is the the instruction that describes the task:
### Input:
Deletes the specified L7 rule.
### Response:
def delete_lbaas_l7rule(self, l7rule, l7policy):
"""Deletes the specified L7 rule."""
return self.delete(self.lbaas_l7rule_path % (l7policy, l7rule)) |
def convert_decimal_to_hundreds(self, amount):
"""
Convert Decimal(10.10) to string "1010"
:param amount:
:return:
"""
return str((amount.quantize(Decimal('.01'), rounding=ROUND_FLOOR) * 100).quantize(Decimal('0'))) | Convert Decimal(10.10) to string "1010"
:param amount:
:return: | Below is the the instruction that describes the task:
### Input:
Convert Decimal(10.10) to string "1010"
:param amount:
:return:
### Response:
def convert_decimal_to_hundreds(self, amount):
"""
Convert Decimal(10.10) to string "1010"
:param amount:
:return:
"""
return str((amount.quantize(Decimal('.01'), rounding=ROUND_FLOOR) * 100).quantize(Decimal('0'))) |
def update_firmware(self, file_url, component_type):
"""Updates the given firmware on the server for the given component.
:param file_url: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server
"""
fw_update_uri = self._get_firmware_update_service_resource()
action_data = {
'Action': 'InstallFromURI',
'FirmwareURI': file_url,
}
# perform the POST
LOG.debug(self._('Flashing firmware file: %s ...'), file_url)
status, headers, response = self._rest_post(
fw_update_uri, None, action_data)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# wait till the firmware update completes.
common.wait_for_ris_firmware_update_to_complete(self)
try:
state, percent = self.get_firmware_update_progress()
except exception.IloError:
msg = 'Status of firmware update not known'
LOG.debug(self._(msg)) # noqa
return
if state == "ERROR":
msg = 'Unable to update firmware'
LOG.debug(self._(msg)) # noqa
raise exception.IloError(msg)
elif state == "UNKNOWN":
msg = 'Status of firmware update not known'
LOG.debug(self._(msg)) # noqa
else: # "COMPLETED" | "IDLE"
LOG.info(self._('Flashing firmware file: %s ... done'), file_url) | Updates the given firmware on the server for the given component.
:param file_url: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server | Below is the the instruction that describes the task:
### Input:
Updates the given firmware on the server for the given component.
:param file_url: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server
### Response:
def update_firmware(self, file_url, component_type):
"""Updates the given firmware on the server for the given component.
:param file_url: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server
"""
fw_update_uri = self._get_firmware_update_service_resource()
action_data = {
'Action': 'InstallFromURI',
'FirmwareURI': file_url,
}
# perform the POST
LOG.debug(self._('Flashing firmware file: %s ...'), file_url)
status, headers, response = self._rest_post(
fw_update_uri, None, action_data)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# wait till the firmware update completes.
common.wait_for_ris_firmware_update_to_complete(self)
try:
state, percent = self.get_firmware_update_progress()
except exception.IloError:
msg = 'Status of firmware update not known'
LOG.debug(self._(msg)) # noqa
return
if state == "ERROR":
msg = 'Unable to update firmware'
LOG.debug(self._(msg)) # noqa
raise exception.IloError(msg)
elif state == "UNKNOWN":
msg = 'Status of firmware update not known'
LOG.debug(self._(msg)) # noqa
else: # "COMPLETED" | "IDLE"
LOG.info(self._('Flashing firmware file: %s ... done'), file_url) |
def encode(self, data, flatten=False):
"""
Encodes the provided input data, returning a sparse vector of activations.
It solves a dynamic system to find optimal activations, as proposed by
Rozell et al. (2008).
:param data: (array) Data to be encoded (single point or multiple)
:param flatten (bool) Whether or not the data needs to be flattened,
in the case of images for example. Does not
need to be enabled during training.
:return: (array) Array of sparse activations (dimOutput,
numPoints)
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
# flatten if necessary
if flatten:
try:
data = np.reshape(data, (self.filterDim, data.shape[-1]))
except ValueError:
# only one data point
data = np.reshape(data, (self.filterDim, 1))
if data.shape[0] != self.filterDim:
raise ValueError("Data does not have the correct dimension!")
# if single data point, convert to 2-dimensional array for consistency
if len(data.shape) == 1:
data = data[:, np.newaxis]
projection = self.basis.T.dot(data)
representation = self.basis.T.dot(self.basis) - np.eye(self.outputDim)
states = np.zeros((self.outputDim, data.shape[1]))
threshold = 0.5 * np.max(np.abs(projection), axis=0)
activations = self._thresholdNonLinearity(states, threshold)
for _ in xrange(self.numLcaIterations):
# update dynamic system
states *= (1 - self.lcaLearningRate)
states += self.lcaLearningRate * (projection - representation.dot(activations))
activations = self._thresholdNonLinearity(states, threshold)
# decay threshold
threshold *= self.thresholdDecay
threshold[threshold < self.minThreshold] = self.minThreshold
return activations | Encodes the provided input data, returning a sparse vector of activations.
It solves a dynamic system to find optimal activations, as proposed by
Rozell et al. (2008).
:param data: (array) Data to be encoded (single point or multiple)
:param flatten (bool) Whether or not the data needs to be flattened,
in the case of images for example. Does not
need to be enabled during training.
:return: (array) Array of sparse activations (dimOutput,
numPoints) | Below is the the instruction that describes the task:
### Input:
Encodes the provided input data, returning a sparse vector of activations.
It solves a dynamic system to find optimal activations, as proposed by
Rozell et al. (2008).
:param data: (array) Data to be encoded (single point or multiple)
:param flatten (bool) Whether or not the data needs to be flattened,
in the case of images for example. Does not
need to be enabled during training.
:return: (array) Array of sparse activations (dimOutput,
numPoints)
### Response:
def encode(self, data, flatten=False):
"""
Encodes the provided input data, returning a sparse vector of activations.
It solves a dynamic system to find optimal activations, as proposed by
Rozell et al. (2008).
:param data: (array) Data to be encoded (single point or multiple)
:param flatten (bool) Whether or not the data needs to be flattened,
in the case of images for example. Does not
need to be enabled during training.
:return: (array) Array of sparse activations (dimOutput,
numPoints)
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
# flatten if necessary
if flatten:
try:
data = np.reshape(data, (self.filterDim, data.shape[-1]))
except ValueError:
# only one data point
data = np.reshape(data, (self.filterDim, 1))
if data.shape[0] != self.filterDim:
raise ValueError("Data does not have the correct dimension!")
# if single data point, convert to 2-dimensional array for consistency
if len(data.shape) == 1:
data = data[:, np.newaxis]
projection = self.basis.T.dot(data)
representation = self.basis.T.dot(self.basis) - np.eye(self.outputDim)
states = np.zeros((self.outputDim, data.shape[1]))
threshold = 0.5 * np.max(np.abs(projection), axis=0)
activations = self._thresholdNonLinearity(states, threshold)
for _ in xrange(self.numLcaIterations):
# update dynamic system
states *= (1 - self.lcaLearningRate)
states += self.lcaLearningRate * (projection - representation.dot(activations))
activations = self._thresholdNonLinearity(states, threshold)
# decay threshold
threshold *= self.thresholdDecay
threshold[threshold < self.minThreshold] = self.minThreshold
return activations |
def complete_io(self, iocb, msg):
"""Called by a handler to return data to the client."""
if _debug: IOController._debug("complete_io %r %r", iocb, msg)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
else:
# change the state
iocb.ioState = COMPLETED
iocb.ioResponse = msg
# notify the client
iocb.trigger() | Called by a handler to return data to the client. | Below is the the instruction that describes the task:
### Input:
Called by a handler to return data to the client.
### Response:
def complete_io(self, iocb, msg):
"""Called by a handler to return data to the client."""
if _debug: IOController._debug("complete_io %r %r", iocb, msg)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
else:
# change the state
iocb.ioState = COMPLETED
iocb.ioResponse = msg
# notify the client
iocb.trigger() |
def add_package(self, login, package_name,
summary=None,
license=None,
public=True,
license_url=None,
license_family=None,
attrs=None,
package_type=None):
'''
Add a new package to a users account
:param login: the login of the package owner
:param package_name: the name of the package to be created
:param package_type: A type identifier for the package (eg. 'pypi' or 'conda', etc.)
:param summary: A short summary about the package
:param license: the name of the package license
:param license_url: the url of the package license
:param public: if true then the package will be hosted publicly
:param attrs: A dictionary of extra attributes for this package
'''
url = '%s/package/%s/%s' % (self.domain, login, package_name)
attrs = attrs or {}
attrs['summary'] = summary
attrs['package_types'] = [package_type]
attrs['license'] = {
'name': license,
'url': license_url,
'family': license_family,
}
payload = dict(public=bool(public),
publish=False,
public_attrs=dict(attrs or {})
)
data, headers = jencode(payload)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res)
return res.json() | Add a new package to a users account
:param login: the login of the package owner
:param package_name: the name of the package to be created
:param package_type: A type identifier for the package (eg. 'pypi' or 'conda', etc.)
:param summary: A short summary about the package
:param license: the name of the package license
:param license_url: the url of the package license
:param public: if true then the package will be hosted publicly
:param attrs: A dictionary of extra attributes for this package | Below is the the instruction that describes the task:
### Input:
Add a new package to a users account
:param login: the login of the package owner
:param package_name: the name of the package to be created
:param package_type: A type identifier for the package (eg. 'pypi' or 'conda', etc.)
:param summary: A short summary about the package
:param license: the name of the package license
:param license_url: the url of the package license
:param public: if true then the package will be hosted publicly
:param attrs: A dictionary of extra attributes for this package
### Response:
def add_package(self, login, package_name,
summary=None,
license=None,
public=True,
license_url=None,
license_family=None,
attrs=None,
package_type=None):
'''
Add a new package to a users account
:param login: the login of the package owner
:param package_name: the name of the package to be created
:param package_type: A type identifier for the package (eg. 'pypi' or 'conda', etc.)
:param summary: A short summary about the package
:param license: the name of the package license
:param license_url: the url of the package license
:param public: if true then the package will be hosted publicly
:param attrs: A dictionary of extra attributes for this package
'''
url = '%s/package/%s/%s' % (self.domain, login, package_name)
attrs = attrs or {}
attrs['summary'] = summary
attrs['package_types'] = [package_type]
attrs['license'] = {
'name': license,
'url': license_url,
'family': license_family,
}
payload = dict(public=bool(public),
publish=False,
public_attrs=dict(attrs or {})
)
data, headers = jencode(payload)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res)
return res.json() |
def create(zpool, *vdevs, **kwargs):
'''
.. versionadded:: 2015.5.0
Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3
zpool : string
Name of storage pool
vdevs : string
One or move devices
force : boolean
Forces use of vdevs, even if they appear in use or specify a
conflicting replication level.
mountpoint : string
Sets the mount point for the root dataset
altroot : string
Equivalent to "-o cachefile=none,altroot=root"
properties : dict
Additional pool properties
filesystem_properties : dict
Additional filesystem properties
createboot : boolean
create a boot partition
.. versionadded:: 2018.3.0
.. warning:
This is only available on illumos and Solaris
CLI Examples:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
.. note::
Zpool properties can be specified at the time of creation of the pool
by passing an additional argument called "properties" and specifying
the properties with their respective values in the form of a python
dictionary:
.. code-block:: text
properties="{'property1': 'value1', 'property2': 'value2'}"
Filesystem properties can be specified at the time of creation of the
pool by passing an additional argument called "filesystem_properties"
and specifying the properties with their respective values in the form
of a python dictionary:
.. code-block:: text
filesystem_properties="{'property1': 'value1', 'property2': 'value2'}"
Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
'''
## Configure pool
# NOTE: initialize the defaults
flags = []
opts = {}
target = []
# NOTE: push pool and filesystem properties
pool_properties = kwargs.get('properties', {})
filesystem_properties = kwargs.get('filesystem_properties', {})
# NOTE: set extra config based on kwargs
if kwargs.get('force', False):
flags.append('-f')
if kwargs.get('createboot', False) or 'bootsize' in pool_properties:
flags.append('-B')
if kwargs.get('altroot', False):
opts['-R'] = kwargs.get('altroot')
if kwargs.get('mountpoint', False):
opts['-m'] = kwargs.get('mountpoint')
# NOTE: append the pool name and specifications
target.append(zpool)
target.extend(vdevs)
## Create storage pool
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='create',
flags=flags,
opts=opts,
pool_properties=pool_properties,
filesystem_properties=filesystem_properties,
target=target,
),
python_shell=False,
)
ret = __utils__['zfs.parse_command_result'](res, 'created')
if ret['created']:
## NOTE: lookup zpool status for vdev config
ret['vdevs'] = _clean_vdev_config(
__salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool],
)
return ret | .. versionadded:: 2015.5.0
Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3
zpool : string
Name of storage pool
vdevs : string
One or move devices
force : boolean
Forces use of vdevs, even if they appear in use or specify a
conflicting replication level.
mountpoint : string
Sets the mount point for the root dataset
altroot : string
Equivalent to "-o cachefile=none,altroot=root"
properties : dict
Additional pool properties
filesystem_properties : dict
Additional filesystem properties
createboot : boolean
create a boot partition
.. versionadded:: 2018.3.0
.. warning:
This is only available on illumos and Solaris
CLI Examples:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
.. note::
Zpool properties can be specified at the time of creation of the pool
by passing an additional argument called "properties" and specifying
the properties with their respective values in the form of a python
dictionary:
.. code-block:: text
properties="{'property1': 'value1', 'property2': 'value2'}"
Filesystem properties can be specified at the time of creation of the
pool by passing an additional argument called "filesystem_properties"
and specifying the properties with their respective values in the form
of a python dictionary:
.. code-block:: text
filesystem_properties="{'property1': 'value1', 'property2': 'value2'}"
Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False] | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.5.0
Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3
zpool : string
Name of storage pool
vdevs : string
One or move devices
force : boolean
Forces use of vdevs, even if they appear in use or specify a
conflicting replication level.
mountpoint : string
Sets the mount point for the root dataset
altroot : string
Equivalent to "-o cachefile=none,altroot=root"
properties : dict
Additional pool properties
filesystem_properties : dict
Additional filesystem properties
createboot : boolean
create a boot partition
.. versionadded:: 2018.3.0
.. warning:
This is only available on illumos and Solaris
CLI Examples:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
.. note::
Zpool properties can be specified at the time of creation of the pool
by passing an additional argument called "properties" and specifying
the properties with their respective values in the form of a python
dictionary:
.. code-block:: text
properties="{'property1': 'value1', 'property2': 'value2'}"
Filesystem properties can be specified at the time of creation of the
pool by passing an additional argument called "filesystem_properties"
and specifying the properties with their respective values in the form
of a python dictionary:
.. code-block:: text
filesystem_properties="{'property1': 'value1', 'property2': 'value2'}"
Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
### Response:
def create(zpool, *vdevs, **kwargs):
'''
.. versionadded:: 2015.5.0
Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3
zpool : string
Name of storage pool
vdevs : string
One or move devices
force : boolean
Forces use of vdevs, even if they appear in use or specify a
conflicting replication level.
mountpoint : string
Sets the mount point for the root dataset
altroot : string
Equivalent to "-o cachefile=none,altroot=root"
properties : dict
Additional pool properties
filesystem_properties : dict
Additional filesystem properties
createboot : boolean
create a boot partition
.. versionadded:: 2018.3.0
.. warning:
This is only available on illumos and Solaris
CLI Examples:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
.. note::
Zpool properties can be specified at the time of creation of the pool
by passing an additional argument called "properties" and specifying
the properties with their respective values in the form of a python
dictionary:
.. code-block:: text
properties="{'property1': 'value1', 'property2': 'value2'}"
Filesystem properties can be specified at the time of creation of the
pool by passing an additional argument called "filesystem_properties"
and specifying the properties with their respective values in the form
of a python dictionary:
.. code-block:: text
filesystem_properties="{'property1': 'value1', 'property2': 'value2'}"
Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}"
CLI Example:
.. code-block:: bash
salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False]
salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False]
salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False]
salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False]
'''
## Configure pool
# NOTE: initialize the defaults
flags = []
opts = {}
target = []
# NOTE: push pool and filesystem properties
pool_properties = kwargs.get('properties', {})
filesystem_properties = kwargs.get('filesystem_properties', {})
# NOTE: set extra config based on kwargs
if kwargs.get('force', False):
flags.append('-f')
if kwargs.get('createboot', False) or 'bootsize' in pool_properties:
flags.append('-B')
if kwargs.get('altroot', False):
opts['-R'] = kwargs.get('altroot')
if kwargs.get('mountpoint', False):
opts['-m'] = kwargs.get('mountpoint')
# NOTE: append the pool name and specifications
target.append(zpool)
target.extend(vdevs)
## Create storage pool
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='create',
flags=flags,
opts=opts,
pool_properties=pool_properties,
filesystem_properties=filesystem_properties,
target=target,
),
python_shell=False,
)
ret = __utils__['zfs.parse_command_result'](res, 'created')
if ret['created']:
## NOTE: lookup zpool status for vdev config
ret['vdevs'] = _clean_vdev_config(
__salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool],
)
return ret |
def AuthorizingClient(
domain,
auth,
request_encoder,
response_decoder,
user_agent=None
):
"""Creates a Freshbooks client for a freshbooks domain, using
an auth object.
"""
http_transport = transport.HttpTransport(
api_url(domain),
build_headers(auth, user_agent)
)
return client.Client(
request_encoder,
http_transport,
response_decoder
) | Creates a Freshbooks client for a freshbooks domain, using
an auth object. | Below is the the instruction that describes the task:
### Input:
Creates a Freshbooks client for a freshbooks domain, using
an auth object.
### Response:
def AuthorizingClient(
domain,
auth,
request_encoder,
response_decoder,
user_agent=None
):
"""Creates a Freshbooks client for a freshbooks domain, using
an auth object.
"""
http_transport = transport.HttpTransport(
api_url(domain),
build_headers(auth, user_agent)
)
return client.Client(
request_encoder,
http_transport,
response_decoder
) |
def delete_keyvault_secret(access_token, vault_uri, secret_name):
'''Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
return do_delete(endpoint, access_token) | Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK. | Below is the the instruction that describes the task:
### Input:
Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK.
### Response:
def delete_keyvault_secret(access_token, vault_uri, secret_name):
'''Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
return do_delete(endpoint, access_token) |
def localize(self, mode="r", perm=None, parent_perm=None, **kwargs):
""" localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs)
"""
if mode not in ("r", "w"):
raise Exception("unknown mode '{}', use r or w".format(mode))
# get additional arguments
skip_copy = kwargs.pop("skip_copy", False)
is_tmp = kwargs.pop("is_tmp", mode == "w")
if mode == "r":
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# always copy
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
finally:
tmp.remove()
else:
# simply yield
yield self
else: # write mode
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# copy when existing
if not skip_copy and self.exists():
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
# move back again
if tmp.exists():
tmp.move_to_local(self, dir_perm=parent_perm)
self.chmod(perm)
else:
logger.warning("cannot move non-existing localized file target {!r}".format(
self))
finally:
tmp.remove()
else:
# create the parent dir
self.parent.touch(perm=parent_perm)
# simply yield
yield self
if self.exists():
self.chmod(perm) | localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs) | Below is the the instruction that describes the task:
### Input:
localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs)
### Response:
def localize(self, mode="r", perm=None, parent_perm=None, **kwargs):
""" localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs)
"""
if mode not in ("r", "w"):
raise Exception("unknown mode '{}', use r or w".format(mode))
# get additional arguments
skip_copy = kwargs.pop("skip_copy", False)
is_tmp = kwargs.pop("is_tmp", mode == "w")
if mode == "r":
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# always copy
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
finally:
tmp.remove()
else:
# simply yield
yield self
else: # write mode
if is_tmp:
# create a temporary target
tmp = self.__class__(is_tmp=self.ext(n=1) or True)
# copy when existing
if not skip_copy and self.exists():
self.copy_to_local(tmp)
# yield the copy
try:
yield tmp
# move back again
if tmp.exists():
tmp.move_to_local(self, dir_perm=parent_perm)
self.chmod(perm)
else:
logger.warning("cannot move non-existing localized file target {!r}".format(
self))
finally:
tmp.remove()
else:
# create the parent dir
self.parent.touch(perm=parent_perm)
# simply yield
yield self
if self.exists():
self.chmod(perm) |
def compress_for_output_listing(paths):
"""Returns a tuple of 2 sets of which paths to display to user
The first set contains paths that would be deleted. Files of a package
are not added and the top-level directory of the package has a '*' added
at the end - to signify that all it's contents are removed.
The second set contains files that would have been skipped in the above
folders.
"""
will_remove = list(paths)
will_skip = set()
# Determine folders and files
folders = set()
files = set()
for path in will_remove:
if path.endswith(".pyc"):
continue
if path.endswith("__init__.py") or ".dist-info" in path:
folders.add(os.path.dirname(path))
files.add(path)
_normcased_files = set(map(os.path.normcase, files))
folders = compact(folders)
# This walks the tree using os.walk to not miss extra folders
# that might get added.
for folder in folders:
for dirpath, _, dirfiles in os.walk(folder):
for fname in dirfiles:
if fname.endswith(".pyc"):
continue
file_ = os.path.join(dirpath, fname)
if (os.path.isfile(file_) and
os.path.normcase(file_) not in _normcased_files):
# We are skipping this file. Add it to the set.
will_skip.add(file_)
will_remove = files | {
os.path.join(folder, "*") for folder in folders
}
return will_remove, will_skip | Returns a tuple of 2 sets of which paths to display to user
The first set contains paths that would be deleted. Files of a package
are not added and the top-level directory of the package has a '*' added
at the end - to signify that all it's contents are removed.
The second set contains files that would have been skipped in the above
folders. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of 2 sets of which paths to display to user
The first set contains paths that would be deleted. Files of a package
are not added and the top-level directory of the package has a '*' added
at the end - to signify that all it's contents are removed.
The second set contains files that would have been skipped in the above
folders.
### Response:
def compress_for_output_listing(paths):
"""Returns a tuple of 2 sets of which paths to display to user
The first set contains paths that would be deleted. Files of a package
are not added and the top-level directory of the package has a '*' added
at the end - to signify that all it's contents are removed.
The second set contains files that would have been skipped in the above
folders.
"""
will_remove = list(paths)
will_skip = set()
# Determine folders and files
folders = set()
files = set()
for path in will_remove:
if path.endswith(".pyc"):
continue
if path.endswith("__init__.py") or ".dist-info" in path:
folders.add(os.path.dirname(path))
files.add(path)
_normcased_files = set(map(os.path.normcase, files))
folders = compact(folders)
# This walks the tree using os.walk to not miss extra folders
# that might get added.
for folder in folders:
for dirpath, _, dirfiles in os.walk(folder):
for fname in dirfiles:
if fname.endswith(".pyc"):
continue
file_ = os.path.join(dirpath, fname)
if (os.path.isfile(file_) and
os.path.normcase(file_) not in _normcased_files):
# We are skipping this file. Add it to the set.
will_skip.add(file_)
will_remove = files | {
os.path.join(folder, "*") for folder in folders
}
return will_remove, will_skip |
def update_location_centroid(point, cluster, max_distance, min_samples):
""" Updates the centroid of a location cluster with another point
Args:
point (:obj:`Point`): Point to add to the cluster
cluster (:obj:`list` of :obj:`Point`): Location cluster
max_distance (float): Max neighbour distance
min_samples (int): Minimum number of samples
Returns:
(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid
and new point cluster (given cluster + given point)
"""
cluster.append(point)
points = [p.gen2arr() for p in cluster]
# Estimates the epsilon
eps = estimate_meters_to_deg(max_distance, precision=6)
p_cluster = DBSCAN(eps=eps, min_samples=min_samples)
p_cluster.fit(points)
clusters = {}
for i, label in enumerate(p_cluster.labels_):
if label in clusters.keys():
clusters[label].append(points[i])
else:
clusters[label] = [points[i]]
centroids = []
biggest_centroid_l = -float("inf")
biggest_centroid = None
for label, n_cluster in clusters.items():
centroid = compute_centroid(n_cluster)
centroids.append(centroid)
if label >= 0 and len(n_cluster) >= biggest_centroid_l:
biggest_centroid_l = len(n_cluster)
biggest_centroid = centroid
if biggest_centroid is None:
biggest_centroid = compute_centroid(points)
return biggest_centroid, cluster | Updates the centroid of a location cluster with another point
Args:
point (:obj:`Point`): Point to add to the cluster
cluster (:obj:`list` of :obj:`Point`): Location cluster
max_distance (float): Max neighbour distance
min_samples (int): Minimum number of samples
Returns:
(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid
and new point cluster (given cluster + given point) | Below is the the instruction that describes the task:
### Input:
Updates the centroid of a location cluster with another point
Args:
point (:obj:`Point`): Point to add to the cluster
cluster (:obj:`list` of :obj:`Point`): Location cluster
max_distance (float): Max neighbour distance
min_samples (int): Minimum number of samples
Returns:
(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid
and new point cluster (given cluster + given point)
### Response:
def update_location_centroid(point, cluster, max_distance, min_samples):
""" Updates the centroid of a location cluster with another point
Args:
point (:obj:`Point`): Point to add to the cluster
cluster (:obj:`list` of :obj:`Point`): Location cluster
max_distance (float): Max neighbour distance
min_samples (int): Minimum number of samples
Returns:
(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid
and new point cluster (given cluster + given point)
"""
cluster.append(point)
points = [p.gen2arr() for p in cluster]
# Estimates the epsilon
eps = estimate_meters_to_deg(max_distance, precision=6)
p_cluster = DBSCAN(eps=eps, min_samples=min_samples)
p_cluster.fit(points)
clusters = {}
for i, label in enumerate(p_cluster.labels_):
if label in clusters.keys():
clusters[label].append(points[i])
else:
clusters[label] = [points[i]]
centroids = []
biggest_centroid_l = -float("inf")
biggest_centroid = None
for label, n_cluster in clusters.items():
centroid = compute_centroid(n_cluster)
centroids.append(centroid)
if label >= 0 and len(n_cluster) >= biggest_centroid_l:
biggest_centroid_l = len(n_cluster)
biggest_centroid = centroid
if biggest_centroid is None:
biggest_centroid = compute_centroid(points)
return biggest_centroid, cluster |
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in sorted(
scrubbed, key=lambda b: b.completion_percentage()):
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
return None | Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration. | Below is the the instruction that describes the task:
### Input:
Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
### Response:
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in sorted(
scrubbed, key=lambda b: b.completion_percentage()):
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
return None |
def PozyxLS(anchors, W, r2, print_out=False):
''' Algorithm used by pozyx (https://www.pozyx.io/Documentation/how_does_positioning_work)
:param anchors: anchor points
:param r2: squared distances from anchors to point x.
:returns: estimated position of point x.
'''
N = anchors.shape[0]
anchors_term = np.sum(np.power(anchors[:-1], 2), axis=1)
last_term = np.sum(np.power(anchors[-1], 2))
b = r2[:-1] - anchors_term + last_term - r2[-1]
A = -2 * (anchors[:-1] - anchors[-1])
x, res, rank, s = np.linalg.lstsq(A, b)
return x | Algorithm used by pozyx (https://www.pozyx.io/Documentation/how_does_positioning_work)
:param anchors: anchor points
:param r2: squared distances from anchors to point x.
:returns: estimated position of point x. | Below is the the instruction that describes the task:
### Input:
Algorithm used by pozyx (https://www.pozyx.io/Documentation/how_does_positioning_work)
:param anchors: anchor points
:param r2: squared distances from anchors to point x.
:returns: estimated position of point x.
### Response:
def PozyxLS(anchors, W, r2, print_out=False):
''' Algorithm used by pozyx (https://www.pozyx.io/Documentation/how_does_positioning_work)
:param anchors: anchor points
:param r2: squared distances from anchors to point x.
:returns: estimated position of point x.
'''
N = anchors.shape[0]
anchors_term = np.sum(np.power(anchors[:-1], 2), axis=1)
last_term = np.sum(np.power(anchors[-1], 2))
b = r2[:-1] - anchors_term + last_term - r2[-1]
A = -2 * (anchors[:-1] - anchors[-1])
x, res, rank, s = np.linalg.lstsq(A, b)
return x |
def _rescanSizes(self, force=True):
""" Zero and recalculate quota sizes to subvolume sizes will be correct. """
status = self.QUOTA_CTL(cmd=BTRFS_QUOTA_CTL_ENABLE).status
logger.debug("CTL Status: %s", hex(status))
status = self.QUOTA_RESCAN_STATUS()
logger.debug("RESCAN Status: %s", status)
if not status.flags:
if not force:
return
self.QUOTA_RESCAN()
logger.warn("Waiting for btrfs quota usage scan...")
self.QUOTA_RESCAN_WAIT() | Zero and recalculate quota sizes to subvolume sizes will be correct. | Below is the the instruction that describes the task:
### Input:
Zero and recalculate quota sizes to subvolume sizes will be correct.
### Response:
def _rescanSizes(self, force=True):
""" Zero and recalculate quota sizes to subvolume sizes will be correct. """
status = self.QUOTA_CTL(cmd=BTRFS_QUOTA_CTL_ENABLE).status
logger.debug("CTL Status: %s", hex(status))
status = self.QUOTA_RESCAN_STATUS()
logger.debug("RESCAN Status: %s", status)
if not status.flags:
if not force:
return
self.QUOTA_RESCAN()
logger.warn("Waiting for btrfs quota usage scan...")
self.QUOTA_RESCAN_WAIT() |
def print_markers(f):
"""A decorator that prints the invoked command
before and after the command.
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
command = ctx.info_name
assert command is not None
command_name = ctx.command_path
click_extensions.echo_with_markers(command_name, marker_color='green')
def print_error(code):
click_extensions.echo_with_markers('end of {} (exit code: {code})'.format(
command_name, code=code), marker_color='red')
def print_success():
click_extensions.echo_with_markers('end of {}'.format(
command_name), marker_color='green')
try:
ctx.invoke(f, *args, **kwargs)
except SystemExit as e:
code = e.code if e.code is not None else exit_codes.ABORT
if code == 0:
print_success()
else:
print_error(code)
raise
except click.ClickException as e:
code = e.exit_code
if code == 0:
print_success()
else:
print_error(code)
raise
except click.Abort as e:
code = exit_codes.ABORT
print_error(code)
raise
except Exception as e:
code = -1
print_error(code)
raise
else:
print_success()
return
return update_wrapper(new_func, f) | A decorator that prints the invoked command
before and after the command. | Below is the the instruction that describes the task:
### Input:
A decorator that prints the invoked command
before and after the command.
### Response:
def print_markers(f):
"""A decorator that prints the invoked command
before and after the command.
"""
@click.pass_context
def new_func(ctx, *args, **kwargs):
command = ctx.info_name
assert command is not None
command_name = ctx.command_path
click_extensions.echo_with_markers(command_name, marker_color='green')
def print_error(code):
click_extensions.echo_with_markers('end of {} (exit code: {code})'.format(
command_name, code=code), marker_color='red')
def print_success():
click_extensions.echo_with_markers('end of {}'.format(
command_name), marker_color='green')
try:
ctx.invoke(f, *args, **kwargs)
except SystemExit as e:
code = e.code if e.code is not None else exit_codes.ABORT
if code == 0:
print_success()
else:
print_error(code)
raise
except click.ClickException as e:
code = e.exit_code
if code == 0:
print_success()
else:
print_error(code)
raise
except click.Abort as e:
code = exit_codes.ABORT
print_error(code)
raise
except Exception as e:
code = -1
print_error(code)
raise
else:
print_success()
return
return update_wrapper(new_func, f) |
def write_metadata(self, fp):
"""Writes metadata to the given file handler.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
"""
fp.attrs['model'] = self.name
fp.attrs['variable_params'] = list(self.variable_params)
fp.attrs['sampling_params'] = list(self.sampling_params)
fp.write_kwargs_to_attrs(fp.attrs, static_params=self.static_params) | Writes metadata to the given file handler.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to. | Below is the the instruction that describes the task:
### Input:
Writes metadata to the given file handler.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
### Response:
def write_metadata(self, fp):
"""Writes metadata to the given file handler.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
"""
fp.attrs['model'] = self.name
fp.attrs['variable_params'] = list(self.variable_params)
fp.attrs['sampling_params'] = list(self.sampling_params)
fp.write_kwargs_to_attrs(fp.attrs, static_params=self.static_params) |
def encode(raw: Any) -> str:
"""
Encode credential attribute value, leaving any (stringified) int32 alone: indy-sdk predicates
operate on int32 values properly only when their encoded values match their raw values.
To disambiguate for decoding, the operation reserves a sentinel for the null value and otherwise adds
2**31 to any non-trivial transform of a non-int32 input, then prepends a digit marking the input type:
* 1: string
* 2: boolean
* 3: non-32-bit integer
* 4: floating point
* 9: other (stringifiable)
:param raw: raw value to encode
:return: encoded value
"""
if raw is None:
return str(I32_BOUND) # sentinel
stringified = str(raw)
if isinstance(raw, bool):
return '{}{}'.format(
ENCODE_PREFIX[bool],
I32_BOUND + 2 if raw else I32_BOUND + 1) # decode gotcha: python bool('False') = True; use 2 sentinels
if isinstance(raw, int) and -I32_BOUND <= raw < I32_BOUND:
return stringified # it's an i32, leave it (as numeric string)
hexed = '{}{}'.format(
ENCODE_PREFIX.get(type(raw), ENCODE_PREFIX[None]),
str(int.from_bytes(hexlify(stringified.encode()), 'big') + I32_BOUND))
return hexed | Encode credential attribute value, leaving any (stringified) int32 alone: indy-sdk predicates
operate on int32 values properly only when their encoded values match their raw values.
To disambiguate for decoding, the operation reserves a sentinel for the null value and otherwise adds
2**31 to any non-trivial transform of a non-int32 input, then prepends a digit marking the input type:
* 1: string
* 2: boolean
* 3: non-32-bit integer
* 4: floating point
* 9: other (stringifiable)
:param raw: raw value to encode
:return: encoded value | Below is the the instruction that describes the task:
### Input:
Encode credential attribute value, leaving any (stringified) int32 alone: indy-sdk predicates
operate on int32 values properly only when their encoded values match their raw values.
To disambiguate for decoding, the operation reserves a sentinel for the null value and otherwise adds
2**31 to any non-trivial transform of a non-int32 input, then prepends a digit marking the input type:
* 1: string
* 2: boolean
* 3: non-32-bit integer
* 4: floating point
* 9: other (stringifiable)
:param raw: raw value to encode
:return: encoded value
### Response:
def encode(raw: Any) -> str:
"""
Encode credential attribute value, leaving any (stringified) int32 alone: indy-sdk predicates
operate on int32 values properly only when their encoded values match their raw values.
To disambiguate for decoding, the operation reserves a sentinel for the null value and otherwise adds
2**31 to any non-trivial transform of a non-int32 input, then prepends a digit marking the input type:
* 1: string
* 2: boolean
* 3: non-32-bit integer
* 4: floating point
* 9: other (stringifiable)
:param raw: raw value to encode
:return: encoded value
"""
if raw is None:
return str(I32_BOUND) # sentinel
stringified = str(raw)
if isinstance(raw, bool):
return '{}{}'.format(
ENCODE_PREFIX[bool],
I32_BOUND + 2 if raw else I32_BOUND + 1) # decode gotcha: python bool('False') = True; use 2 sentinels
if isinstance(raw, int) and -I32_BOUND <= raw < I32_BOUND:
return stringified # it's an i32, leave it (as numeric string)
hexed = '{}{}'.format(
ENCODE_PREFIX.get(type(raw), ENCODE_PREFIX[None]),
str(int.from_bytes(hexlify(stringified.encode()), 'big') + I32_BOUND))
return hexed |
def get_contrib_names(self, contrib):
"""
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
"""
collab = contrib.find('collab')
anon = contrib.find('anonymous')
if collab is not None:
proper_name = serialize(collab, strip=True)
file_as_name = proper_name
elif anon is not None:
proper_name = 'Anonymous'
file_as_name = proper_name
else:
name = contrib.find('name')
surname = name.find('surname').text
given = name.find('given-names')
if given is not None:
if given.text: # Sometimes these tags are empty
proper_name = ' '.join([surname, given.text])
#File-as name is <surname>, <given-initial-char>
file_as_name = ', '.join([surname, given.text[0]])
else:
proper_name = surname
file_as_name = proper_name
else:
proper_name = surname
file_as_name = proper_name
return proper_name, file_as_name | Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well. | Below is the the instruction that describes the task:
### Input:
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
### Response:
def get_contrib_names(self, contrib):
"""
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
"""
collab = contrib.find('collab')
anon = contrib.find('anonymous')
if collab is not None:
proper_name = serialize(collab, strip=True)
file_as_name = proper_name
elif anon is not None:
proper_name = 'Anonymous'
file_as_name = proper_name
else:
name = contrib.find('name')
surname = name.find('surname').text
given = name.find('given-names')
if given is not None:
if given.text: # Sometimes these tags are empty
proper_name = ' '.join([surname, given.text])
#File-as name is <surname>, <given-initial-char>
file_as_name = ', '.join([surname, given.text[0]])
else:
proper_name = surname
file_as_name = proper_name
else:
proper_name = surname
file_as_name = proper_name
return proper_name, file_as_name |
def delete_user_policy(user_name, policy_name, region=None, key=None, keyid=None, profile=None):
'''
Delete a user policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_user_policy myuser mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
_policy = get_user_policy(
user_name, policy_name, region, key, keyid, profile
)
if not _policy:
return True
try:
conn.delete_user_policy(user_name, policy_name)
log.info('Successfully deleted policy %s for IAM user %s.', policy_name, user_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to delete policy %s for IAM user %s.', policy_name, user_name)
return False | Delete a user policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_user_policy myuser mypolicy | Below is the the instruction that describes the task:
### Input:
Delete a user policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_user_policy myuser mypolicy
### Response:
def delete_user_policy(user_name, policy_name, region=None, key=None, keyid=None, profile=None):
'''
Delete a user policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_user_policy myuser mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False
_policy = get_user_policy(
user_name, policy_name, region, key, keyid, profile
)
if not _policy:
return True
try:
conn.delete_user_policy(user_name, policy_name)
log.info('Successfully deleted policy %s for IAM user %s.', policy_name, user_name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to delete policy %s for IAM user %s.', policy_name, user_name)
return False |
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Time Stamp record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('TF record not yet initialized!')
outlist = [b'TF', struct.pack('=BBB', RRTFRecord.length(self.time_flags), SU_ENTRY_VERSION, self.time_flags)]
for fieldname in self.FIELDNAMES:
field = getattr(self, fieldname)
if field is not None:
outlist.append(field.record())
return b''.join(outlist) | Generate a string representing the Rock Ridge Time Stamp record.
Parameters:
None.
Returns:
String containing the Rock Ridge record. | Below is the the instruction that describes the task:
### Input:
Generate a string representing the Rock Ridge Time Stamp record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
### Response:
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Time Stamp record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('TF record not yet initialized!')
outlist = [b'TF', struct.pack('=BBB', RRTFRecord.length(self.time_flags), SU_ENTRY_VERSION, self.time_flags)]
for fieldname in self.FIELDNAMES:
field = getattr(self, fieldname)
if field is not None:
outlist.append(field.record())
return b''.join(outlist) |
def resolve_var(self, varname, context=None):
"""Resolves name as a variable in a given context.
If no context specified page context' is considered as context.
:param str|unicode varname:
:param Context context:
:return:
"""
context = context or self.current_page_context
if isinstance(varname, FilterExpression):
varname = varname.resolve(context)
else:
varname = varname.strip()
try:
varname = Variable(varname).resolve(context)
except VariableDoesNotExist:
varname = varname
return varname | Resolves name as a variable in a given context.
If no context specified page context' is considered as context.
:param str|unicode varname:
:param Context context:
:return: | Below is the the instruction that describes the task:
### Input:
Resolves name as a variable in a given context.
If no context specified page context' is considered as context.
:param str|unicode varname:
:param Context context:
:return:
### Response:
def resolve_var(self, varname, context=None):
"""Resolves name as a variable in a given context.
If no context specified page context' is considered as context.
:param str|unicode varname:
:param Context context:
:return:
"""
context = context or self.current_page_context
if isinstance(varname, FilterExpression):
varname = varname.resolve(context)
else:
varname = varname.strip()
try:
varname = Variable(varname).resolve(context)
except VariableDoesNotExist:
varname = varname
return varname |
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
"""
pos = self._prepare_pos_var_for_move(pos)
cls = get_result_class(self.__class__)
parent = None
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
if target.is_leaf():
parent = target
pos = 'last-child'
else:
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
if target.is_descendant_of(self):
raise InvalidMoveToDescendant(
_("Can't move node to a descendant."))
if self == target and (
(pos == 'left') or
(pos in ('right', 'last-sibling') and
target == target.get_last_sibling()) or
(pos == 'first-sibling' and
target == target.get_first_sibling())):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), self))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
# ok let's move this
cursor = self._get_database_cursor('write')
move_right = cls._move_right
gap = self.rgt - self.lft + 1
sql = None
target_tree = target.tree_id
# first make a hole
if pos == 'last-child':
newpos = parent.rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif target.is_root():
newpos = 1
if pos == 'last-sibling':
target_tree = target.get_siblings().reverse()[0].tree_id + 1
elif pos == 'first-sibling':
target_tree = 1
sql, params = cls._move_tree_right(1)
elif pos == 'left':
sql, params = cls._move_tree_right(target.tree_id)
else:
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id,
newpos - 1, False, gap)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, gap)
if sql:
cursor.execute(sql, params)
# we reload 'self' because lft/rgt may have changed
fromobj = cls.objects.get(pk=self.pk)
depthdiff = target.depth - fromobj.depth
if parent:
depthdiff += 1
# move the tree to the hole
sql = "UPDATE %(table)s "\
" SET tree_id = %(target_tree)d, "\
" lft = lft + %(jump)d , "\
" rgt = rgt + %(jump)d , "\
" depth = depth + %(depthdiff)d "\
" WHERE tree_id = %(from_tree)d AND "\
" lft BETWEEN %(fromlft)d AND %(fromrgt)d" % {
'table': connection.ops.quote_name(cls._meta.db_table),
'from_tree': fromobj.tree_id,
'target_tree': target_tree,
'jump': newpos - fromobj.lft,
'depthdiff': depthdiff,
'fromlft': fromobj.lft,
'fromrgt': fromobj.rgt}
cursor.execute(sql, [])
# close the gap
sql, params = cls._get_close_gap_sql(fromobj.lft,
fromobj.rgt, fromobj.tree_id)
cursor.execute(sql, params) | Moves the current node and all it's descendants to a new position
relative to another node. | Below is the the instruction that describes the task:
### Input:
Moves the current node and all it's descendants to a new position
relative to another node.
### Response:
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
"""
pos = self._prepare_pos_var_for_move(pos)
cls = get_result_class(self.__class__)
parent = None
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
if target.is_leaf():
parent = target
pos = 'last-child'
else:
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
if target.is_descendant_of(self):
raise InvalidMoveToDescendant(
_("Can't move node to a descendant."))
if self == target and (
(pos == 'left') or
(pos in ('right', 'last-sibling') and
target == target.get_last_sibling()) or
(pos == 'first-sibling' and
target == target.get_first_sibling())):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
siblings = list(target.get_sorted_pos_queryset(
target.get_siblings(), self))
if siblings:
pos = 'left'
target = siblings[0]
else:
pos = 'last-sibling'
if pos in ('left', 'right', 'first-sibling'):
siblings = list(target.get_siblings())
if pos == 'right':
if target == siblings[-1]:
pos = 'last-sibling'
else:
pos = 'left'
found = False
for node in siblings:
if found:
target = node
break
elif node == target:
found = True
if pos == 'left':
if target == siblings[0]:
pos = 'first-sibling'
if pos == 'first-sibling':
target = siblings[0]
# ok let's move this
cursor = self._get_database_cursor('write')
move_right = cls._move_right
gap = self.rgt - self.lft + 1
sql = None
target_tree = target.tree_id
# first make a hole
if pos == 'last-child':
newpos = parent.rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif target.is_root():
newpos = 1
if pos == 'last-sibling':
target_tree = target.get_siblings().reverse()[0].tree_id + 1
elif pos == 'first-sibling':
target_tree = 1
sql, params = cls._move_tree_right(1)
elif pos == 'left':
sql, params = cls._move_tree_right(target.tree_id)
else:
if pos == 'last-sibling':
newpos = target.get_parent().rgt
sql, params = move_right(target.tree_id, newpos, False, gap)
elif pos == 'first-sibling':
newpos = target.lft
sql, params = move_right(target.tree_id,
newpos - 1, False, gap)
elif pos == 'left':
newpos = target.lft
sql, params = move_right(target.tree_id, newpos, True, gap)
if sql:
cursor.execute(sql, params)
# we reload 'self' because lft/rgt may have changed
fromobj = cls.objects.get(pk=self.pk)
depthdiff = target.depth - fromobj.depth
if parent:
depthdiff += 1
# move the tree to the hole
sql = "UPDATE %(table)s "\
" SET tree_id = %(target_tree)d, "\
" lft = lft + %(jump)d , "\
" rgt = rgt + %(jump)d , "\
" depth = depth + %(depthdiff)d "\
" WHERE tree_id = %(from_tree)d AND "\
" lft BETWEEN %(fromlft)d AND %(fromrgt)d" % {
'table': connection.ops.quote_name(cls._meta.db_table),
'from_tree': fromobj.tree_id,
'target_tree': target_tree,
'jump': newpos - fromobj.lft,
'depthdiff': depthdiff,
'fromlft': fromobj.lft,
'fromrgt': fromobj.rgt}
cursor.execute(sql, [])
# close the gap
sql, params = cls._get_close_gap_sql(fromobj.lft,
fromobj.rgt, fromobj.tree_id)
cursor.execute(sql, params) |
def handle_unexpected_exception(exc):
# type: (BaseException) -> str
"""Return an error message and write a log file if logging was not enabled.
Args:
exc: The unexpected exception.
Returns:
A message to display to the user concerning the unexpected exception.
"""
try:
write_logfile()
addendum = 'Please see the log file for more information.'
except IOError:
addendum = 'Unable to write log file.'
try:
message = str(exc)
return '{}{}{}'.format(message, '\n' if message else '', addendum)
except Exception: # pylint: disable=broad-except
return str(exc) | Return an error message and write a log file if logging was not enabled.
Args:
exc: The unexpected exception.
Returns:
A message to display to the user concerning the unexpected exception. | Below is the the instruction that describes the task:
### Input:
Return an error message and write a log file if logging was not enabled.
Args:
exc: The unexpected exception.
Returns:
A message to display to the user concerning the unexpected exception.
### Response:
def handle_unexpected_exception(exc):
# type: (BaseException) -> str
"""Return an error message and write a log file if logging was not enabled.
Args:
exc: The unexpected exception.
Returns:
A message to display to the user concerning the unexpected exception.
"""
try:
write_logfile()
addendum = 'Please see the log file for more information.'
except IOError:
addendum = 'Unable to write log file.'
try:
message = str(exc)
return '{}{}{}'.format(message, '\n' if message else '', addendum)
except Exception: # pylint: disable=broad-except
return str(exc) |
def attribute(self, value):
"""
Setter for **self.__attribute** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"attribute", value)
self.__attribute = value | Setter for **self.__attribute** attribute.
:param value: Attribute value.
:type value: unicode | Below is the the instruction that describes the task:
### Input:
Setter for **self.__attribute** attribute.
:param value: Attribute value.
:type value: unicode
### Response:
def attribute(self, value):
"""
Setter for **self.__attribute** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"attribute", value)
self.__attribute = value |
def remove_extra_presentations(self, resource, timeout=-1):
"""
Removes extra presentations from a specified server profile.
Args:
resource (dict):
Object to create
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Associated storage attachment resource.
"""
uri = self.URI + "/repair"
custom_headers = {'Accept-Language': 'en_US'}
return self._client.create(resource, uri=uri, timeout=timeout, custom_headers=custom_headers) | Removes extra presentations from a specified server profile.
Args:
resource (dict):
Object to create
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Associated storage attachment resource. | Below is the the instruction that describes the task:
### Input:
Removes extra presentations from a specified server profile.
Args:
resource (dict):
Object to create
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Associated storage attachment resource.
### Response:
def remove_extra_presentations(self, resource, timeout=-1):
"""
Removes extra presentations from a specified server profile.
Args:
resource (dict):
Object to create
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Associated storage attachment resource.
"""
uri = self.URI + "/repair"
custom_headers = {'Accept-Language': 'en_US'}
return self._client.create(resource, uri=uri, timeout=timeout, custom_headers=custom_headers) |
def get_gateway_id(self):
"""Return a unique id for the gateway."""
info = next(serial.tools.list_ports.grep(self.port), None)
return info.serial_number if info is not None else None | Return a unique id for the gateway. | Below is the the instruction that describes the task:
### Input:
Return a unique id for the gateway.
### Response:
def get_gateway_id(self):
"""Return a unique id for the gateway."""
info = next(serial.tools.list_ports.grep(self.port), None)
return info.serial_number if info is not None else None |
def view_graph(graph_str, dest_file=None):
"""View a dot graph in an image viewer."""
from rez.system import system
from rez.config import config
if (system.platform == "linux") and (not os.getenv("DISPLAY")):
print >> sys.stderr, "Unable to open display."
sys.exit(1)
dest_file = _write_graph(graph_str, dest_file=dest_file)
# view graph
viewed = False
prog = config.image_viewer or 'browser'
print "loading image viewer (%s)..." % prog
if config.image_viewer:
proc = popen([config.image_viewer, dest_file])
proc.wait()
viewed = not bool(proc.returncode)
if not viewed:
import webbrowser
webbrowser.open_new("file://" + dest_file) | View a dot graph in an image viewer. | Below is the the instruction that describes the task:
### Input:
View a dot graph in an image viewer.
### Response:
def view_graph(graph_str, dest_file=None):
"""View a dot graph in an image viewer."""
from rez.system import system
from rez.config import config
if (system.platform == "linux") and (not os.getenv("DISPLAY")):
print >> sys.stderr, "Unable to open display."
sys.exit(1)
dest_file = _write_graph(graph_str, dest_file=dest_file)
# view graph
viewed = False
prog = config.image_viewer or 'browser'
print "loading image viewer (%s)..." % prog
if config.image_viewer:
proc = popen([config.image_viewer, dest_file])
proc.wait()
viewed = not bool(proc.returncode)
if not viewed:
import webbrowser
webbrowser.open_new("file://" + dest_file) |
def authorize(self):
'''
Prepare the master to expect a signing request
'''
with salt.utils.files.fopen(self.path, 'w+') as fp_:
fp_.write(str(int(time.time()))) # future lint: disable=blacklisted-function
return True | Prepare the master to expect a signing request | Below is the the instruction that describes the task:
### Input:
Prepare the master to expect a signing request
### Response:
def authorize(self):
'''
Prepare the master to expect a signing request
'''
with salt.utils.files.fopen(self.path, 'w+') as fp_:
fp_.write(str(int(time.time()))) # future lint: disable=blacklisted-function
return True |
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = pep425tags.get_supported()
return next((True for t in self.tags() if t in supported_tags), False) | Is the wheel is compatible with the current platform? | Below is the the instruction that describes the task:
### Input:
Is the wheel is compatible with the current platform?
### Response:
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = pep425tags.get_supported()
return next((True for t in self.tags() if t in supported_tags), False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.