code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def has_saved(self, addr):
"""Test if device has data from the saved data file."""
saved = False
if self._saved_devices.get(addr, None) is not None:
saved = True
return saved | Test if device has data from the saved data file. | Below is the the instruction that describes the task:
### Input:
Test if device has data from the saved data file.
### Response:
def has_saved(self, addr):
"""Test if device has data from the saved data file."""
saved = False
if self._saved_devices.get(addr, None) is not None:
saved = True
return saved |
def season(self):
"""
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
"""
date = self.date()
return date.year - 1 if date.month <= 3 else date.year | Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season. | Below is the the instruction that describes the task:
### Input:
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
### Response:
def season(self):
"""
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
"""
date = self.date()
return date.year - 1 if date.month <= 3 else date.year |
def eval_now(self, code):
"""Reformat and evaluate a code snippet and return code for the result."""
result = eval(self.reformat(code))
if result is None or isinstance(result, (bool, int, float, complex)):
return repr(result)
elif isinstance(result, bytes):
return "b" + self.wrap_str_of(result)
elif isinstance(result, str):
return self.wrap_str_of(result)
else:
return None | Reformat and evaluate a code snippet and return code for the result. | Below is the the instruction that describes the task:
### Input:
Reformat and evaluate a code snippet and return code for the result.
### Response:
def eval_now(self, code):
"""Reformat and evaluate a code snippet and return code for the result."""
result = eval(self.reformat(code))
if result is None or isinstance(result, (bool, int, float, complex)):
return repr(result)
elif isinstance(result, bytes):
return "b" + self.wrap_str_of(result)
elif isinstance(result, str):
return self.wrap_str_of(result)
else:
return None |
def aa_db_search(self, files, base, unpack, search_method,
maximum_range, threads, evalue, min_orf_length,
restrict_read_length, diamond_database):
'''
Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit
'''
# Define outputs
if search_method == 'hmmsearch':
output_search_file = files.hmmsearch_output_path(base)
elif search_method == 'diamond':
output_search_file = files.diamond_search_output_basename(base)
hit_reads_fasta = files.fa_output_path(base)
hit_reads_orfs_fasta = files.orf_fasta_output_path(base)
return self.search_and_extract_orfs_matching_protein_database(\
unpack,
search_method,
maximum_range,
threads,
evalue,
min_orf_length,
restrict_read_length,
diamond_database,
output_search_file,
hit_reads_fasta,
hit_reads_orfs_fasta) | Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit | Below is the the instruction that describes the task:
### Input:
Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit
### Response:
def aa_db_search(self, files, base, unpack, search_method,
maximum_range, threads, evalue, min_orf_length,
restrict_read_length, diamond_database):
'''
Amino acid database search pipeline - pipeline where reads are searched
as amino acids, and hits are identified using hmmsearch or diamond
searches
Parameters
----------
files : obj
graftm_output_paths object.
base : str
The name of the input file, stripped of all suffixes, and paths.
Used for creating file names with 'files' object.
unpack : obj
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
search_method : str
The method for searching, either 'hmmsearch' or 'diamond'
maximum_range : int
Maximum range that a gene can extend within a contig. Any hits
that extend beyond this length cannot be linked. max_range is defined
as 1.5 X the average length of all full length genes used in the
search database. This is defined in the CONTENTS.json file within a
gpkg.
threads : int
Number of threads for hmmer to use
evalue : str
evalue cutoff for hmmer to use
min_orf_length : int
minimum orf length for orfm to use
restrict_read_length : int
orf length to retrict orfm to.
diamond_database : str
Path to diamond database to use when searching. Set to 'None' if not
using diamond pipeline
Returns
-------
String path to amino acid fasta file of reads that hit
'''
# Define outputs
if search_method == 'hmmsearch':
output_search_file = files.hmmsearch_output_path(base)
elif search_method == 'diamond':
output_search_file = files.diamond_search_output_basename(base)
hit_reads_fasta = files.fa_output_path(base)
hit_reads_orfs_fasta = files.orf_fasta_output_path(base)
return self.search_and_extract_orfs_matching_protein_database(\
unpack,
search_method,
maximum_range,
threads,
evalue,
min_orf_length,
restrict_read_length,
diamond_database,
output_search_file,
hit_reads_fasta,
hit_reads_orfs_fasta) |
def updateRouterStatus(self):
"""force update to router as if there is child id request"""
print '%s call updateRouterStatus' % self.port
cmd = 'state'
while True:
state = self.__sendCommand(cmd)[0]
if state == 'detached':
continue
elif state == 'child':
break
else:
return False
cmd = 'state router'
return self.__sendCommand(cmd)[0] == 'Done' | force update to router as if there is child id request | Below is the the instruction that describes the task:
### Input:
force update to router as if there is child id request
### Response:
def updateRouterStatus(self):
"""force update to router as if there is child id request"""
print '%s call updateRouterStatus' % self.port
cmd = 'state'
while True:
state = self.__sendCommand(cmd)[0]
if state == 'detached':
continue
elif state == 'child':
break
else:
return False
cmd = 'state router'
return self.__sendCommand(cmd)[0] == 'Done' |
def BatchConvert(self, metadata_value_pairs, token=None):
"""Converts a batch of GrrMessages into a set of RDFValues at once.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for conversion and value
is a GrrMessage to be converted.
token: Security token.
Returns:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible.
"""
# Group messages by source (i.e. by client urn).
msg_dict = {}
for metadata, msg in metadata_value_pairs:
msg_dict.setdefault(msg.source, []).append((metadata, msg))
metadata_objects = []
metadata_to_fetch = []
# Open the clients we don't have metadata for and fetch metadata.
for client_urn in msg_dict:
try:
metadata_objects.append(self.cached_metadata[client_urn])
except KeyError:
metadata_to_fetch.append(client_urn)
if metadata_to_fetch:
if data_store.RelationalDBEnabled():
client_ids = set(urn.Basename() for urn in metadata_to_fetch)
infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)
fetched_metadata = [
GetMetadata(client_id, info) for client_id, info in infos.items()
]
else:
client_fds = aff4.FACTORY.MultiOpen(
metadata_to_fetch, mode="r", token=token)
fetched_metadata = [
GetMetadataLegacy(client_fd, token=token)
for client_fd in client_fds
]
for metadata in fetched_metadata:
self.cached_metadata[metadata.client_urn] = metadata
metadata_objects.extend(fetched_metadata)
data_by_type = {}
for metadata in metadata_objects:
try:
for original_metadata, message in msg_dict[metadata.client_urn]:
# Get source_urn and annotations from the original metadata
# provided and original_timestamp from the payload age.
new_metadata = ExportedMetadata(metadata)
new_metadata.source_urn = original_metadata.source_urn
new_metadata.annotations = original_metadata.annotations
new_metadata.original_timestamp = message.payload.age
cls_name = message.payload.__class__.__name__
# Create a dict of values for conversion keyed by type, so we can
# apply the right converters to the right object types
if cls_name not in data_by_type:
converters_classes = ExportConverter.GetConvertersByValue(
message.payload)
data_by_type[cls_name] = {
"converters": [cls(self.options) for cls in converters_classes],
"batch_data": [(new_metadata, message.payload)]
}
else:
data_by_type[cls_name]["batch_data"].append(
(new_metadata, message.payload))
except KeyError:
pass
# Run all converters against all objects of the relevant type
converted_batch = []
for dataset in itervalues(data_by_type):
for converter in dataset["converters"]:
converted_batch.extend(
converter.BatchConvert(dataset["batch_data"], token=token))
return converted_batch | Converts a batch of GrrMessages into a set of RDFValues at once.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for conversion and value
is a GrrMessage to be converted.
token: Security token.
Returns:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible. | Below is the the instruction that describes the task:
### Input:
Converts a batch of GrrMessages into a set of RDFValues at once.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for conversion and value
is a GrrMessage to be converted.
token: Security token.
Returns:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible.
### Response:
def BatchConvert(self, metadata_value_pairs, token=None):
"""Converts a batch of GrrMessages into a set of RDFValues at once.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for conversion and value
is a GrrMessage to be converted.
token: Security token.
Returns:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible.
"""
# Group messages by source (i.e. by client urn).
msg_dict = {}
for metadata, msg in metadata_value_pairs:
msg_dict.setdefault(msg.source, []).append((metadata, msg))
metadata_objects = []
metadata_to_fetch = []
# Open the clients we don't have metadata for and fetch metadata.
for client_urn in msg_dict:
try:
metadata_objects.append(self.cached_metadata[client_urn])
except KeyError:
metadata_to_fetch.append(client_urn)
if metadata_to_fetch:
if data_store.RelationalDBEnabled():
client_ids = set(urn.Basename() for urn in metadata_to_fetch)
infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)
fetched_metadata = [
GetMetadata(client_id, info) for client_id, info in infos.items()
]
else:
client_fds = aff4.FACTORY.MultiOpen(
metadata_to_fetch, mode="r", token=token)
fetched_metadata = [
GetMetadataLegacy(client_fd, token=token)
for client_fd in client_fds
]
for metadata in fetched_metadata:
self.cached_metadata[metadata.client_urn] = metadata
metadata_objects.extend(fetched_metadata)
data_by_type = {}
for metadata in metadata_objects:
try:
for original_metadata, message in msg_dict[metadata.client_urn]:
# Get source_urn and annotations from the original metadata
# provided and original_timestamp from the payload age.
new_metadata = ExportedMetadata(metadata)
new_metadata.source_urn = original_metadata.source_urn
new_metadata.annotations = original_metadata.annotations
new_metadata.original_timestamp = message.payload.age
cls_name = message.payload.__class__.__name__
# Create a dict of values for conversion keyed by type, so we can
# apply the right converters to the right object types
if cls_name not in data_by_type:
converters_classes = ExportConverter.GetConvertersByValue(
message.payload)
data_by_type[cls_name] = {
"converters": [cls(self.options) for cls in converters_classes],
"batch_data": [(new_metadata, message.payload)]
}
else:
data_by_type[cls_name]["batch_data"].append(
(new_metadata, message.payload))
except KeyError:
pass
# Run all converters against all objects of the relevant type
converted_batch = []
for dataset in itervalues(data_by_type):
for converter in dataset["converters"]:
converted_batch.extend(
converter.BatchConvert(dataset["batch_data"], token=token))
return converted_batch |
def getChanges(self, request):
"""
Take the logic from the change hook, and then delegate it
to the proper handler
We use the buildbot plugin mechanisms to find out about dialects
and call getChanges()
the return value is a list of changes
if DIALECT is unspecified, a sample implementation is provided
"""
uriRE = re.search(r'^/change_hook/?([a-zA-Z0-9_]*)', bytes2unicode(request.uri))
if not uriRE:
log.msg("URI doesn't match change_hook regex: %s" % request.uri)
raise ValueError(
"URI doesn't match change_hook regex: %s" % request.uri)
changes = []
src = None
# Was there a dialect provided?
if uriRE.group(1):
dialect = uriRE.group(1)
else:
dialect = 'base'
handler = self.makeHandler(dialect)
changes, src = yield handler.getChanges(request)
return (changes, src) | Take the logic from the change hook, and then delegate it
to the proper handler
We use the buildbot plugin mechanisms to find out about dialects
and call getChanges()
the return value is a list of changes
if DIALECT is unspecified, a sample implementation is provided | Below is the the instruction that describes the task:
### Input:
Take the logic from the change hook, and then delegate it
to the proper handler
We use the buildbot plugin mechanisms to find out about dialects
and call getChanges()
the return value is a list of changes
if DIALECT is unspecified, a sample implementation is provided
### Response:
def getChanges(self, request):
"""
Take the logic from the change hook, and then delegate it
to the proper handler
We use the buildbot plugin mechanisms to find out about dialects
and call getChanges()
the return value is a list of changes
if DIALECT is unspecified, a sample implementation is provided
"""
uriRE = re.search(r'^/change_hook/?([a-zA-Z0-9_]*)', bytes2unicode(request.uri))
if not uriRE:
log.msg("URI doesn't match change_hook regex: %s" % request.uri)
raise ValueError(
"URI doesn't match change_hook regex: %s" % request.uri)
changes = []
src = None
# Was there a dialect provided?
if uriRE.group(1):
dialect = uriRE.group(1)
else:
dialect = 'base'
handler = self.makeHandler(dialect)
changes, src = yield handler.getChanges(request)
return (changes, src) |
def remove_package(self, team, user, package):
"""
Removes a package (all instances) from this store.
"""
self.check_name(team, user, package)
path = self.package_path(team, user, package)
remove_objs = set()
# TODO: do we really want to delete invisible dirs?
if os.path.isdir(path):
# Collect objects from all instances for potential cleanup
contents_path = os.path.join(path, PackageStore.CONTENTS_DIR)
for instance in os.listdir(contents_path):
pkg = self.get_package(team, user, package, pkghash=instance)
remove_objs.update(find_object_hashes(pkg))
# Remove package manifests
rmtree(path)
return self.prune(remove_objs) | Removes a package (all instances) from this store. | Below is the the instruction that describes the task:
### Input:
Removes a package (all instances) from this store.
### Response:
def remove_package(self, team, user, package):
"""
Removes a package (all instances) from this store.
"""
self.check_name(team, user, package)
path = self.package_path(team, user, package)
remove_objs = set()
# TODO: do we really want to delete invisible dirs?
if os.path.isdir(path):
# Collect objects from all instances for potential cleanup
contents_path = os.path.join(path, PackageStore.CONTENTS_DIR)
for instance in os.listdir(contents_path):
pkg = self.get_package(team, user, package, pkghash=instance)
remove_objs.update(find_object_hashes(pkg))
# Remove package manifests
rmtree(path)
return self.prune(remove_objs) |
def _organize_step_scatter(step, inputs, remapped):
"""Add scattering information from inputs, remapping input variables.
"""
def extract_scatter_id(inp):
_, ns_var = inp.split("#")
_, var = ns_var.split("/")
return var
scatter_local = {}
if "scatter" in step.tool:
assert step.tool["scatterMethod"] == "dotproduct", \
"Only support dotproduct scattering in conversion to WDL"
inp_val = collections.OrderedDict()
for x in inputs:
inp_val[x["id"]] = x["value"]
for scatter_key in [extract_scatter_id(x) for x in step.tool["scatter"]]:
scatter_key = remapped.get(scatter_key) or scatter_key
val = inp_val[scatter_key]
if len(val.split(".")) in [1, 2]:
base_key = val
attr = None
elif len(val.split(".")) == 3:
orig_location, record, attr = val.split(".")
base_key = "%s.%s" % (orig_location, record)
else:
raise ValueError("Unexpected scatter input: %s" % val)
local_ref = base_key.split(".")[-1] + "_local"
scatter_local[base_key] = local_ref
if attr:
local_ref += ".%s" % attr
inp_val[scatter_key] = local_ref
inputs = [{"id": iid, "value": ival} for iid, ival in inp_val.items()]
return inputs, [(v, k) for k, v in scatter_local.items()] | Add scattering information from inputs, remapping input variables. | Below is the the instruction that describes the task:
### Input:
Add scattering information from inputs, remapping input variables.
### Response:
def _organize_step_scatter(step, inputs, remapped):
"""Add scattering information from inputs, remapping input variables.
"""
def extract_scatter_id(inp):
_, ns_var = inp.split("#")
_, var = ns_var.split("/")
return var
scatter_local = {}
if "scatter" in step.tool:
assert step.tool["scatterMethod"] == "dotproduct", \
"Only support dotproduct scattering in conversion to WDL"
inp_val = collections.OrderedDict()
for x in inputs:
inp_val[x["id"]] = x["value"]
for scatter_key in [extract_scatter_id(x) for x in step.tool["scatter"]]:
scatter_key = remapped.get(scatter_key) or scatter_key
val = inp_val[scatter_key]
if len(val.split(".")) in [1, 2]:
base_key = val
attr = None
elif len(val.split(".")) == 3:
orig_location, record, attr = val.split(".")
base_key = "%s.%s" % (orig_location, record)
else:
raise ValueError("Unexpected scatter input: %s" % val)
local_ref = base_key.split(".")[-1] + "_local"
scatter_local[base_key] = local_ref
if attr:
local_ref += ".%s" % attr
inp_val[scatter_key] = local_ref
inputs = [{"id": iid, "value": ival} for iid, ival in inp_val.items()]
return inputs, [(v, k) for k, v in scatter_local.items()] |
def distance_to_current_waypoint():
"""
Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location).
"""
nextwaypoint = vehicle.commands.next
if nextwaypoint==0:
return None
missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed
lat = missionitem.x
lon = missionitem.y
alt = missionitem.z
targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)
distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)
return distancetopoint | Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location). | Below is the the instruction that describes the task:
### Input:
Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location).
### Response:
def distance_to_current_waypoint():
"""
Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location).
"""
nextwaypoint = vehicle.commands.next
if nextwaypoint==0:
return None
missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed
lat = missionitem.x
lon = missionitem.y
alt = missionitem.z
targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)
distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)
return distancetopoint |
def header_from_raw_header_data(cls, raw_header):
"""Unpack binary message header data obtained as a reply from HANA
:param raw_header: binary string containing message header data
:returns: named tuple for easy access of header data
"""
try:
header = MessageHeader(*cls.header_struct.unpack(raw_header))
except struct.error:
raise Exception("Invalid message header received")
return header | Unpack binary message header data obtained as a reply from HANA
:param raw_header: binary string containing message header data
:returns: named tuple for easy access of header data | Below is the the instruction that describes the task:
### Input:
Unpack binary message header data obtained as a reply from HANA
:param raw_header: binary string containing message header data
:returns: named tuple for easy access of header data
### Response:
def header_from_raw_header_data(cls, raw_header):
"""Unpack binary message header data obtained as a reply from HANA
:param raw_header: binary string containing message header data
:returns: named tuple for easy access of header data
"""
try:
header = MessageHeader(*cls.header_struct.unpack(raw_header))
except struct.error:
raise Exception("Invalid message header received")
return header |
def _tr_above(self):
"""
The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
"""
tr_lst = self._tbl.tr_lst
tr_idx = tr_lst.index(self._tr)
if tr_idx == 0:
raise ValueError('no tr above topmost tr')
return tr_lst[tr_idx-1] | The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row. | Below is the the instruction that describes the task:
### Input:
The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
### Response:
def _tr_above(self):
"""
The tr element prior in sequence to the tr this cell appears in.
Raises |ValueError| if called on a cell in the top-most row.
"""
tr_lst = self._tbl.tr_lst
tr_idx = tr_lst.index(self._tr)
if tr_idx == 0:
raise ValueError('no tr above topmost tr')
return tr_lst[tr_idx-1] |
def to_code(self, context: Context =None):
"""
Generate the code and return it as a string.
"""
# Do not override this method!
context = context or Context()
for imp in self.imports:
if imp not in context.imports:
context.imports.append(imp)
counter = Counter()
lines = list(self.to_lines(context=context, counter=counter))
if counter.num_indented_non_doc_blocks == 0:
if self.expects_body_or_pass:
lines.append(" pass")
elif self.closed_by:
lines[-1] += self.closed_by
else:
if self.closed_by:
lines.append(self.closed_by)
return join_lines(*lines) + self._suffix | Generate the code and return it as a string. | Below is the the instruction that describes the task:
### Input:
Generate the code and return it as a string.
### Response:
def to_code(self, context: Context =None):
"""
Generate the code and return it as a string.
"""
# Do not override this method!
context = context or Context()
for imp in self.imports:
if imp not in context.imports:
context.imports.append(imp)
counter = Counter()
lines = list(self.to_lines(context=context, counter=counter))
if counter.num_indented_non_doc_blocks == 0:
if self.expects_body_or_pass:
lines.append(" pass")
elif self.closed_by:
lines[-1] += self.closed_by
else:
if self.closed_by:
lines.append(self.closed_by)
return join_lines(*lines) + self._suffix |
def stack(self, key, labels=None):
"""Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.
"""
rows, labels = [], labels or self.labels
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for k, v in row.asdict().items()
if k != key and k in labels]
return type(self)([key, 'column', 'value']).with_rows(rows) | Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data. | Below is the the instruction that describes the task:
### Input:
Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.
### Response:
def stack(self, key, labels=None):
"""Takes k original columns and returns two columns, with col. 1 of
all column names and col. 2 of all associated data.
"""
rows, labels = [], labels or self.labels
for row in self.rows:
[rows.append((getattr(row, key), k, v)) for k, v in row.asdict().items()
if k != key and k in labels]
return type(self)([key, 'column', 'value']).with_rows(rows) |
def rank_motifs(stats, metrics=("roc_auc", "recall_at_fdr")):
"""Determine mean rank of motifs based on metrics."""
rank = {}
combined_metrics = []
motif_ids = stats.keys()
background = list(stats.values())[0].keys()
for metric in metrics:
mean_metric_stats = [np.mean(
[stats[m][bg][metric] for bg in background]) for m in motif_ids]
ranked_metric_stats = rankdata(mean_metric_stats)
combined_metrics.append(ranked_metric_stats)
for motif, val in zip(motif_ids, np.mean(combined_metrics, 0)):
rank[motif] = val
return rank | Determine mean rank of motifs based on metrics. | Below is the the instruction that describes the task:
### Input:
Determine mean rank of motifs based on metrics.
### Response:
def rank_motifs(stats, metrics=("roc_auc", "recall_at_fdr")):
"""Determine mean rank of motifs based on metrics."""
rank = {}
combined_metrics = []
motif_ids = stats.keys()
background = list(stats.values())[0].keys()
for metric in metrics:
mean_metric_stats = [np.mean(
[stats[m][bg][metric] for bg in background]) for m in motif_ids]
ranked_metric_stats = rankdata(mean_metric_stats)
combined_metrics.append(ranked_metric_stats)
for motif, val in zip(motif_ids, np.mean(combined_metrics, 0)):
rank[motif] = val
return rank |
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func) | Import the common log functions from the global logger to the module. | Below is the the instruction that describes the task:
### Input:
Import the common log functions from the global logger to the module.
### Response:
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func) |
def additions_remove(**kwargs):
'''
Remove VirtualBox Guest Additions.
Firstly it tries to uninstall itself by executing
'/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'.
It uses the CD, connected by VirtualBox if it failes.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_remove
salt '*' vbox_guest.additions_remove force=True
:param force: force VirtualBox Guest Additions removing
:type force: bool
:return: True if VirtualBox Guest Additions were removed successfully else False
'''
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
ret = _additions_remove_linux()
if not ret:
ret = _additions_remove_use_cd(**kwargs)
return ret | Remove VirtualBox Guest Additions.
Firstly it tries to uninstall itself by executing
'/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'.
It uses the CD, connected by VirtualBox if it failes.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_remove
salt '*' vbox_guest.additions_remove force=True
:param force: force VirtualBox Guest Additions removing
:type force: bool
:return: True if VirtualBox Guest Additions were removed successfully else False | Below is the the instruction that describes the task:
### Input:
Remove VirtualBox Guest Additions.
Firstly it tries to uninstall itself by executing
'/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'.
It uses the CD, connected by VirtualBox if it failes.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_remove
salt '*' vbox_guest.additions_remove force=True
:param force: force VirtualBox Guest Additions removing
:type force: bool
:return: True if VirtualBox Guest Additions were removed successfully else False
### Response:
def additions_remove(**kwargs):
'''
Remove VirtualBox Guest Additions.
Firstly it tries to uninstall itself by executing
'/opt/VBoxGuestAdditions-VERSION/uninstall.run uninstall'.
It uses the CD, connected by VirtualBox if it failes.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_remove
salt '*' vbox_guest.additions_remove force=True
:param force: force VirtualBox Guest Additions removing
:type force: bool
:return: True if VirtualBox Guest Additions were removed successfully else False
'''
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
ret = _additions_remove_linux()
if not ret:
ret = _additions_remove_use_cd(**kwargs)
return ret |
def pvc_iyang(
petin,
mridct,
Cnt,
pvcroi,
krnl,
itr=5,
tool='niftyreg',
faff='',
outpath='',
fcomment='',
store_img=False,
store_rois=False,
):
''' Perform partial volume (PVC) correction of PET data (petin) using MRI data (mridct).
The PVC method uses iterative Yang method.
GPU based convolution is the key routine of the PVC.
Input:
-------
petin: either a dictionary containing image data, file name and affine transform,
or a string of the path to the NIfTI file of the PET data.
mridct: a dictionary of MRI data, including the T1w image, which can be given
in DICOM (field 'T1DCM') or NIfTI (field 'T1nii'). The T1w image data
is needed for co-registration to PET if affine is not given in the text
file with its path in faff.
Cnt: a dictionary of paths for third-party tools:
* dcm2niix: Cnt['DCM2NIIX']
* niftyreg, resample: Cnt['RESPATH']
* niftyreg, rigid-reg: Cnt['REGPATH']
* verbose mode on/off: Cnt['VERBOSE'] = True/False
pvcroi: list of regions (also a list) with number label to distinguish
the parcellations. The numbers correspond to the image values
of the parcellated T1w image. E.g.:
pvcroi = [
[36], # ROI 1 (single parcellation region)
[35], # ROI 2
[39, 40, 72, 73, 74], # ROI 3 (region consisting of multiple parcellation regions)
...
]
kernel: the point spread function (PSF) specific for the camera and the object.
It is given as a 3x17 matrix, a 17-element kernel for each dimension (x,y,z).
It is used in the GPU-based convolution using separable kernels.
outpath:path to the output of the resulting PVC images
faff: a text file of the affine transformations needed to get the MRI into PET space.
If not provided, it will be obtained from the performed rigid transformation.
For this the MR T1w image is required. If faff and T1w image are not provided,
it will results in exception/error.
fcomment:a string used in naming the produced files, helpful for distinguishing them.
tool: co-registration tool. By default it is NiftyReg, but SPM is also
possible (needs Matlab engine and more validation)
itr: number of iterations used by the PVC. 5-10 should be enough (5 default)
'''
# get all the input image properties
if isinstance(petin, dict):
im = imdic['im']
fpet = imdic['fpet']
B = imdic['affine']
elif isinstance(petin, basestring) and os.path.isfile(petin):
imdct = imio.getnii(petin, output='all')
im = imdct['im']
B = imdct['affine']
fpet = petin
if im.ndim!=3:
raise IndexError('Only 3D images are expected in this method of partial volume correction.')
# check if brain parcellations exist in NIfTI format
if not os.path.isfile(mridct['T1lbl']):
raise NameError('MissingLabels')
#> output dictionary
outdct = {}
# establish the output folder
if outpath=='':
prcl_dir = os.path.dirname(mridct['T1lbl'])
pvc_dir = os.path.join(os.path.dirname(fpet), 'PVC')
else:
prcl_dir = outpath
pvc_dir = os.path.join(os.path.dirname(fpet), 'PVC')
#> create folders
imio.create_dir(prcl_dir)
imio.create_dir(pvc_dir)
#==================================================================
#if affine transf. (faff) is given then take the T1 and resample it too.
if isinstance(faff, basestring) and not os.path.isfile(faff):
# faff is not given; get it by running the affine; get T1w to PET space
ft1w = imio.pick_t1w(mridct)
if tool=='spm':
regdct = regseg.coreg_spm(
fpet,
ft1w,
fcomment = fcomment,
outpath=os.path.join(outpath,'PET', 'positioning')
)
elif tool=='niftyreg':
regdct = regseg.affine_niftyreg(
fpet,
ft1w,
outpath=os.path.join(outpath,'PET', 'positioning'),
fcomment = fcomment,
executable = Cnt['REGPATH'],
omp = multiprocessing.cpu_count()/2,
rigOnly = True,
affDirect = False,
maxit=5,
speed=True,
pi=50, pv=50,
smof=0, smor=0,
rmsk=True,
fmsk=True,
rfwhm=15., #millilitres
rthrsh=0.05,
ffwhm = 15., #millilitres
fthrsh=0.05,
verbose=Cnt['VERBOSE']
)
faff = regdct['faff']
# resample the GIF T1/labels to upsampled PET
# file name of the parcellation (GIF-based) upsampled to PET
fgt1u = os.path.join(
prcl_dir,
os.path.basename(mridct['T1lbl']).split('.')[0]\
+'_registered_trimmed'+fcomment+'.nii.gz')
if tool=='niftyreg':
if os.path.isfile( Cnt['RESPATH'] ):
cmd = [Cnt['RESPATH'], '-ref', fpet, '-flo', mridct['T1lbl'],
'-trans', faff, '-res', fgt1u, '-inter', '0']
if not Cnt['VERBOSE']: cmd.append('-voff')
call(cmd)
else:
raise IOError('e> path to resampling executable is incorrect!')
elif tool=='spm':
fout = regseg.resample_spm(
fpet,
mridct['T1lbl'],
faff,
fimout = fgt1u,
matlab_eng = regdct['matlab_eng'],
intrp = 0.,
del_ref_uncmpr = True,
del_flo_uncmpr = True,
del_out_uncmpr = True,
)
#==================================================================
# Get the parcellation labels in the upsampled PET space
dprcl = imio.getnii(fgt1u, output='all')
prcl = dprcl['im']
#> path to parcellations in NIfTI format
prcl_pth = os.path.split(mridct['T1lbl'])
#---------------------------------------------------------------------------
#> get the parcellation specific for PVC based on the current parcellations
imgroi = prcl.copy(); imgroi[:] = 0
#> number of segments, without the background
nSeg = len(pvcroi)
#> create the image of numbered parcellations
for k in range(nSeg):
for m in pvcroi[k]:
imgroi[prcl==m] = k+1
#> save the PCV ROIs to a new NIfTI file
if store_rois:
froi = os.path.join(prcl_dir, prcl_pth[1].split('.nii')[0]+'_PVC-ROIs-inPET.nii.gz')
imio.array2nii(
imgroi,
dprcl['affine'],
froi,
trnsp = (dprcl['transpose'].index(0),
dprcl['transpose'].index(1),
dprcl['transpose'].index(2)),
flip = dprcl['flip'])
outdct['froi'] = froi
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# run iterative Yang PVC
imgpvc, m_a = iyang(im, krnl, imgroi, Cnt, itr=itr)
#---------------------------------------------------------------------------
outdct['im'] = imgpvc
outdct['imroi'] = imgroi
outdct['faff'] = faff
if store_img:
fpvc = os.path.join( pvc_dir,
os.path.split(fpet)[1].split('.nii')[0]+'_PVC'+fcomment+'.nii.gz')
imio.array2nii( imgpvc[::-1,::-1,:], B, fpvc, descrip='pvc=iY')
outdct['fpet'] = fpvc
return outdct | Perform partial volume (PVC) correction of PET data (petin) using MRI data (mridct).
The PVC method uses iterative Yang method.
GPU based convolution is the key routine of the PVC.
Input:
-------
petin: either a dictionary containing image data, file name and affine transform,
or a string of the path to the NIfTI file of the PET data.
mridct: a dictionary of MRI data, including the T1w image, which can be given
in DICOM (field 'T1DCM') or NIfTI (field 'T1nii'). The T1w image data
is needed for co-registration to PET if affine is not given in the text
file with its path in faff.
Cnt: a dictionary of paths for third-party tools:
* dcm2niix: Cnt['DCM2NIIX']
* niftyreg, resample: Cnt['RESPATH']
* niftyreg, rigid-reg: Cnt['REGPATH']
* verbose mode on/off: Cnt['VERBOSE'] = True/False
pvcroi: list of regions (also a list) with number label to distinguish
the parcellations. The numbers correspond to the image values
of the parcellated T1w image. E.g.:
pvcroi = [
[36], # ROI 1 (single parcellation region)
[35], # ROI 2
[39, 40, 72, 73, 74], # ROI 3 (region consisting of multiple parcellation regions)
...
]
kernel: the point spread function (PSF) specific for the camera and the object.
It is given as a 3x17 matrix, a 17-element kernel for each dimension (x,y,z).
It is used in the GPU-based convolution using separable kernels.
outpath:path to the output of the resulting PVC images
faff: a text file of the affine transformations needed to get the MRI into PET space.
If not provided, it will be obtained from the performed rigid transformation.
For this the MR T1w image is required. If faff and T1w image are not provided,
it will results in exception/error.
fcomment:a string used in naming the produced files, helpful for distinguishing them.
tool: co-registration tool. By default it is NiftyReg, but SPM is also
possible (needs Matlab engine and more validation)
itr: number of iterations used by the PVC. 5-10 should be enough (5 default) | Below is the the instruction that describes the task:
### Input:
Perform partial volume (PVC) correction of PET data (petin) using MRI data (mridct).
The PVC method uses iterative Yang method.
GPU based convolution is the key routine of the PVC.
Input:
-------
petin: either a dictionary containing image data, file name and affine transform,
or a string of the path to the NIfTI file of the PET data.
mridct: a dictionary of MRI data, including the T1w image, which can be given
in DICOM (field 'T1DCM') or NIfTI (field 'T1nii'). The T1w image data
is needed for co-registration to PET if affine is not given in the text
file with its path in faff.
Cnt: a dictionary of paths for third-party tools:
* dcm2niix: Cnt['DCM2NIIX']
* niftyreg, resample: Cnt['RESPATH']
* niftyreg, rigid-reg: Cnt['REGPATH']
* verbose mode on/off: Cnt['VERBOSE'] = True/False
pvcroi: list of regions (also a list) with number label to distinguish
the parcellations. The numbers correspond to the image values
of the parcellated T1w image. E.g.:
pvcroi = [
[36], # ROI 1 (single parcellation region)
[35], # ROI 2
[39, 40, 72, 73, 74], # ROI 3 (region consisting of multiple parcellation regions)
...
]
kernel: the point spread function (PSF) specific for the camera and the object.
It is given as a 3x17 matrix, a 17-element kernel for each dimension (x,y,z).
It is used in the GPU-based convolution using separable kernels.
outpath:path to the output of the resulting PVC images
faff: a text file of the affine transformations needed to get the MRI into PET space.
If not provided, it will be obtained from the performed rigid transformation.
For this the MR T1w image is required. If faff and T1w image are not provided,
it will results in exception/error.
fcomment:a string used in naming the produced files, helpful for distinguishing them.
tool: co-registration tool. By default it is NiftyReg, but SPM is also
possible (needs Matlab engine and more validation)
itr: number of iterations used by the PVC. 5-10 should be enough (5 default)
### Response:
def pvc_iyang(
petin,
mridct,
Cnt,
pvcroi,
krnl,
itr=5,
tool='niftyreg',
faff='',
outpath='',
fcomment='',
store_img=False,
store_rois=False,
):
''' Perform partial volume (PVC) correction of PET data (petin) using MRI data (mridct).
The PVC method uses iterative Yang method.
GPU based convolution is the key routine of the PVC.
Input:
-------
petin: either a dictionary containing image data, file name and affine transform,
or a string of the path to the NIfTI file of the PET data.
mridct: a dictionary of MRI data, including the T1w image, which can be given
in DICOM (field 'T1DCM') or NIfTI (field 'T1nii'). The T1w image data
is needed for co-registration to PET if affine is not given in the text
file with its path in faff.
Cnt: a dictionary of paths for third-party tools:
* dcm2niix: Cnt['DCM2NIIX']
* niftyreg, resample: Cnt['RESPATH']
* niftyreg, rigid-reg: Cnt['REGPATH']
* verbose mode on/off: Cnt['VERBOSE'] = True/False
pvcroi: list of regions (also a list) with number label to distinguish
the parcellations. The numbers correspond to the image values
of the parcellated T1w image. E.g.:
pvcroi = [
[36], # ROI 1 (single parcellation region)
[35], # ROI 2
[39, 40, 72, 73, 74], # ROI 3 (region consisting of multiple parcellation regions)
...
]
kernel: the point spread function (PSF) specific for the camera and the object.
It is given as a 3x17 matrix, a 17-element kernel for each dimension (x,y,z).
It is used in the GPU-based convolution using separable kernels.
outpath:path to the output of the resulting PVC images
faff: a text file of the affine transformations needed to get the MRI into PET space.
If not provided, it will be obtained from the performed rigid transformation.
For this the MR T1w image is required. If faff and T1w image are not provided,
it will results in exception/error.
fcomment:a string used in naming the produced files, helpful for distinguishing them.
tool: co-registration tool. By default it is NiftyReg, but SPM is also
possible (needs Matlab engine and more validation)
itr: number of iterations used by the PVC. 5-10 should be enough (5 default)
'''
# get all the input image properties
if isinstance(petin, dict):
im = imdic['im']
fpet = imdic['fpet']
B = imdic['affine']
elif isinstance(petin, basestring) and os.path.isfile(petin):
imdct = imio.getnii(petin, output='all')
im = imdct['im']
B = imdct['affine']
fpet = petin
if im.ndim!=3:
raise IndexError('Only 3D images are expected in this method of partial volume correction.')
# check if brain parcellations exist in NIfTI format
if not os.path.isfile(mridct['T1lbl']):
raise NameError('MissingLabels')
#> output dictionary
outdct = {}
# establish the output folder
if outpath=='':
prcl_dir = os.path.dirname(mridct['T1lbl'])
pvc_dir = os.path.join(os.path.dirname(fpet), 'PVC')
else:
prcl_dir = outpath
pvc_dir = os.path.join(os.path.dirname(fpet), 'PVC')
#> create folders
imio.create_dir(prcl_dir)
imio.create_dir(pvc_dir)
#==================================================================
#if affine transf. (faff) is given then take the T1 and resample it too.
if isinstance(faff, basestring) and not os.path.isfile(faff):
# faff is not given; get it by running the affine; get T1w to PET space
ft1w = imio.pick_t1w(mridct)
if tool=='spm':
regdct = regseg.coreg_spm(
fpet,
ft1w,
fcomment = fcomment,
outpath=os.path.join(outpath,'PET', 'positioning')
)
elif tool=='niftyreg':
regdct = regseg.affine_niftyreg(
fpet,
ft1w,
outpath=os.path.join(outpath,'PET', 'positioning'),
fcomment = fcomment,
executable = Cnt['REGPATH'],
omp = multiprocessing.cpu_count()/2,
rigOnly = True,
affDirect = False,
maxit=5,
speed=True,
pi=50, pv=50,
smof=0, smor=0,
rmsk=True,
fmsk=True,
rfwhm=15., #millilitres
rthrsh=0.05,
ffwhm = 15., #millilitres
fthrsh=0.05,
verbose=Cnt['VERBOSE']
)
faff = regdct['faff']
# resample the GIF T1/labels to upsampled PET
# file name of the parcellation (GIF-based) upsampled to PET
fgt1u = os.path.join(
prcl_dir,
os.path.basename(mridct['T1lbl']).split('.')[0]\
+'_registered_trimmed'+fcomment+'.nii.gz')
if tool=='niftyreg':
if os.path.isfile( Cnt['RESPATH'] ):
cmd = [Cnt['RESPATH'], '-ref', fpet, '-flo', mridct['T1lbl'],
'-trans', faff, '-res', fgt1u, '-inter', '0']
if not Cnt['VERBOSE']: cmd.append('-voff')
call(cmd)
else:
raise IOError('e> path to resampling executable is incorrect!')
elif tool=='spm':
fout = regseg.resample_spm(
fpet,
mridct['T1lbl'],
faff,
fimout = fgt1u,
matlab_eng = regdct['matlab_eng'],
intrp = 0.,
del_ref_uncmpr = True,
del_flo_uncmpr = True,
del_out_uncmpr = True,
)
#==================================================================
# Get the parcellation labels in the upsampled PET space
dprcl = imio.getnii(fgt1u, output='all')
prcl = dprcl['im']
#> path to parcellations in NIfTI format
prcl_pth = os.path.split(mridct['T1lbl'])
#---------------------------------------------------------------------------
#> get the parcellation specific for PVC based on the current parcellations
imgroi = prcl.copy(); imgroi[:] = 0
#> number of segments, without the background
nSeg = len(pvcroi)
#> create the image of numbered parcellations
for k in range(nSeg):
for m in pvcroi[k]:
imgroi[prcl==m] = k+1
#> save the PCV ROIs to a new NIfTI file
if store_rois:
froi = os.path.join(prcl_dir, prcl_pth[1].split('.nii')[0]+'_PVC-ROIs-inPET.nii.gz')
imio.array2nii(
imgroi,
dprcl['affine'],
froi,
trnsp = (dprcl['transpose'].index(0),
dprcl['transpose'].index(1),
dprcl['transpose'].index(2)),
flip = dprcl['flip'])
outdct['froi'] = froi
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# run iterative Yang PVC
imgpvc, m_a = iyang(im, krnl, imgroi, Cnt, itr=itr)
#---------------------------------------------------------------------------
outdct['im'] = imgpvc
outdct['imroi'] = imgroi
outdct['faff'] = faff
if store_img:
fpvc = os.path.join( pvc_dir,
os.path.split(fpet)[1].split('.nii')[0]+'_PVC'+fcomment+'.nii.gz')
imio.array2nii( imgpvc[::-1,::-1,:], B, fpvc, descrip='pvc=iY')
outdct['fpet'] = fpvc
return outdct |
def trim(hdu):
"""TRIM a CFHT MEGAPRIME frame using the DATASEC keyword"""
datasec = re.findall(r'(\d+)',
hdu.header.get('DATASEC'))
l=int(datasec[0])-1
r=int(datasec[1])
b=int(datasec[2])-1
t=int(datasec[3])
if opt.verbose:
print "Trimming [%d:%d,%d:%d]" % ( l,r,b,t)
hdu.data = hdu.data[b:t,l:r]
hdu.header.update('DATASEC',"[%d:%d,%d:%d]" % (1,r-l+1,1,t-b+1), comment="Image was trimmed")
hdu.header.update('ODATASEC',"[%d:%d,%d:%d]" % (l+1,r,b+1,t), comment="previous DATASEC")
return | TRIM a CFHT MEGAPRIME frame using the DATASEC keyword | Below is the the instruction that describes the task:
### Input:
TRIM a CFHT MEGAPRIME frame using the DATASEC keyword
### Response:
def trim(hdu):
"""TRIM a CFHT MEGAPRIME frame using the DATASEC keyword"""
datasec = re.findall(r'(\d+)',
hdu.header.get('DATASEC'))
l=int(datasec[0])-1
r=int(datasec[1])
b=int(datasec[2])-1
t=int(datasec[3])
if opt.verbose:
print "Trimming [%d:%d,%d:%d]" % ( l,r,b,t)
hdu.data = hdu.data[b:t,l:r]
hdu.header.update('DATASEC',"[%d:%d,%d:%d]" % (1,r-l+1,1,t-b+1), comment="Image was trimmed")
hdu.header.update('ODATASEC',"[%d:%d,%d:%d]" % (l+1,r,b+1,t), comment="previous DATASEC")
return |
def getReffs(
self,
textId: str,
level: int=1,
subreference: Union[str, BaseReference]=None,
include_descendants: bool=False,
additional_parameters: Optional[Dict[str, Any]]=None
) -> BaseReferenceSet:
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param include_descendants:
:param additional_parameters:
:return: List of references
:rtype: [str]
..toDo :: This starts to be a bloated function....
"""
raise NotImplementedError() | Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param include_descendants:
:param additional_parameters:
:return: List of references
:rtype: [str]
..toDo :: This starts to be a bloated function.... | Below is the the instruction that describes the task:
### Input:
Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param include_descendants:
:param additional_parameters:
:return: List of references
:rtype: [str]
..toDo :: This starts to be a bloated function....
### Response:
def getReffs(
self,
textId: str,
level: int=1,
subreference: Union[str, BaseReference]=None,
include_descendants: bool=False,
additional_parameters: Optional[Dict[str, Any]]=None
) -> BaseReferenceSet:
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param level: Depth for retrieval
:type level: int
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param include_descendants:
:param additional_parameters:
:return: List of references
:rtype: [str]
..toDo :: This starts to be a bloated function....
"""
raise NotImplementedError() |
def call_color(*args, **kwargs):
'''
Set a color to the lamp.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
* **color**: Fixed color. Values are: red, green, blue, orange, pink, white,
yellow, daylight, purple. Default white.
* **transition**: Transition 0~200.
Advanced:
* **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation.
More: http://www.developers.meethue.com/documentation/hue-xy-values
CLI Example:
.. code-block:: bash
salt '*' hue.color
salt '*' hue.color id=1
salt '*' hue.color id=1,2,3 oolor=red transition=30
salt '*' hue.color id=1 gamut=0.3,0.5
'''
res = dict()
colormap = {
'red': Const.COLOR_RED,
'green': Const.COLOR_GREEN,
'blue': Const.COLOR_BLUE,
'orange': Const.COLOR_ORANGE,
'pink': Const.COLOR_PINK,
'white': Const.COLOR_WHITE,
'yellow': Const.COLOR_YELLOW,
'daylight': Const.COLOR_DAYLIGHT,
'purple': Const.COLOR_PURPLE,
}
devices = _get_lights()
color = kwargs.get("gamut")
if color:
color = color.split(",")
if len(color) == 2:
try:
color = {"xy": [float(color[0]), float(color[1])]}
except Exception as ex:
color = None
else:
color = None
if not color:
color = colormap.get(kwargs.get("color", 'white'), Const.COLOR_WHITE)
color.update({"transitiontime": max(min(kwargs.get("transition", 0), 200), 0)})
for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):
res[dev_id] = _set(dev_id, color)
return res | Set a color to the lamp.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
* **color**: Fixed color. Values are: red, green, blue, orange, pink, white,
yellow, daylight, purple. Default white.
* **transition**: Transition 0~200.
Advanced:
* **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation.
More: http://www.developers.meethue.com/documentation/hue-xy-values
CLI Example:
.. code-block:: bash
salt '*' hue.color
salt '*' hue.color id=1
salt '*' hue.color id=1,2,3 oolor=red transition=30
salt '*' hue.color id=1 gamut=0.3,0.5 | Below is the the instruction that describes the task:
### Input:
Set a color to the lamp.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
* **color**: Fixed color. Values are: red, green, blue, orange, pink, white,
yellow, daylight, purple. Default white.
* **transition**: Transition 0~200.
Advanced:
* **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation.
More: http://www.developers.meethue.com/documentation/hue-xy-values
CLI Example:
.. code-block:: bash
salt '*' hue.color
salt '*' hue.color id=1
salt '*' hue.color id=1,2,3 oolor=red transition=30
salt '*' hue.color id=1 gamut=0.3,0.5
### Response:
def call_color(*args, **kwargs):
'''
Set a color to the lamp.
Options:
* **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.
* **color**: Fixed color. Values are: red, green, blue, orange, pink, white,
yellow, daylight, purple. Default white.
* **transition**: Transition 0~200.
Advanced:
* **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation.
More: http://www.developers.meethue.com/documentation/hue-xy-values
CLI Example:
.. code-block:: bash
salt '*' hue.color
salt '*' hue.color id=1
salt '*' hue.color id=1,2,3 oolor=red transition=30
salt '*' hue.color id=1 gamut=0.3,0.5
'''
res = dict()
colormap = {
'red': Const.COLOR_RED,
'green': Const.COLOR_GREEN,
'blue': Const.COLOR_BLUE,
'orange': Const.COLOR_ORANGE,
'pink': Const.COLOR_PINK,
'white': Const.COLOR_WHITE,
'yellow': Const.COLOR_YELLOW,
'daylight': Const.COLOR_DAYLIGHT,
'purple': Const.COLOR_PURPLE,
}
devices = _get_lights()
color = kwargs.get("gamut")
if color:
color = color.split(",")
if len(color) == 2:
try:
color = {"xy": [float(color[0]), float(color[1])]}
except Exception as ex:
color = None
else:
color = None
if not color:
color = colormap.get(kwargs.get("color", 'white'), Const.COLOR_WHITE)
color.update({"transitiontime": max(min(kwargs.get("transition", 0), 200), 0)})
for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):
res[dev_id] = _set(dev_id, color)
return res |
def print_progress_bar_multi_threads(nb_threads, suffix='', decimals=1, length=15,
fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
string = ""
for k in range(nb_threads):
try:
threads_state = eval(read_file("threads_state_%s" % str(k)))
except SyntaxError:
time.sleep(0.001)
try:
threads_state = eval(read_file("threads_state_%s" % str(k)))
except SyntaxError:
pass
iteration = threads_state["iteration"]
total = threads_state["total"]
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
prefix = "Thread %s :" % str(k)
string = string + '%s %s%% ' % (prefix, percent)
print(string + " " + suffix) | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str) | Below is the the instruction that describes the task:
### Input:
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
### Response:
def print_progress_bar_multi_threads(nb_threads, suffix='', decimals=1, length=15,
fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
string = ""
for k in range(nb_threads):
try:
threads_state = eval(read_file("threads_state_%s" % str(k)))
except SyntaxError:
time.sleep(0.001)
try:
threads_state = eval(read_file("threads_state_%s" % str(k)))
except SyntaxError:
pass
iteration = threads_state["iteration"]
total = threads_state["total"]
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
prefix = "Thread %s :" % str(k)
string = string + '%s %s%% ' % (prefix, percent)
print(string + " " + suffix) |
def receive_pong(self, pong: Pong):
""" Handles a Pong message. """
message_id = ('ping', pong.nonce, pong.sender)
async_result = self.messageids_to_asyncresults.get(message_id)
if async_result is not None:
self.log_healthcheck.debug(
'Pong received',
sender=pex(pong.sender),
message_id=pong.nonce,
)
async_result.set(True)
else:
self.log_healthcheck.warn(
'Unknown pong received',
message_id=message_id,
) | Handles a Pong message. | Below is the the instruction that describes the task:
### Input:
Handles a Pong message.
### Response:
def receive_pong(self, pong: Pong):
""" Handles a Pong message. """
message_id = ('ping', pong.nonce, pong.sender)
async_result = self.messageids_to_asyncresults.get(message_id)
if async_result is not None:
self.log_healthcheck.debug(
'Pong received',
sender=pex(pong.sender),
message_id=pong.nonce,
)
async_result.set(True)
else:
self.log_healthcheck.warn(
'Unknown pong received',
message_id=message_id,
) |
def run_field_scan(ModelClass, model_kwargs, t_output_every, t_upto, field,
vals, force_resume=True, parallel=False):
"""Run many models with a range of parameter sets.
Parameters
----------
ModelClass: callable
A class or factory function that returns a model object by
calling `ModelClass(model_kwargs)`
model_kwargs: dict
See `ModelClass` explanation.
t_output_every: float
see :class:`Runner`.
t_upto: float
Run each model until the time is equal to this
field: str
The name of the field to be varied, whose values are in `vals`.
vals: array_like
Iterable of values to use to instantiate each Model object.
parallel: bool
Whether or not to run the models in parallel, using the Multiprocessing
library. If `True`, the number of concurrent tasks will be equal to
one less than the number of available cores detected.
"""
model_kwarg_sets = [dict(model_kwargs, field=val) for val in vals]
run_kwarg_scan(ModelClass, model_kwarg_sets,
t_output_every, t_upto, force_resume, parallel) | Run many models with a range of parameter sets.
Parameters
----------
ModelClass: callable
A class or factory function that returns a model object by
calling `ModelClass(model_kwargs)`
model_kwargs: dict
See `ModelClass` explanation.
t_output_every: float
see :class:`Runner`.
t_upto: float
Run each model until the time is equal to this
field: str
The name of the field to be varied, whose values are in `vals`.
vals: array_like
Iterable of values to use to instantiate each Model object.
parallel: bool
Whether or not to run the models in parallel, using the Multiprocessing
library. If `True`, the number of concurrent tasks will be equal to
one less than the number of available cores detected. | Below is the the instruction that describes the task:
### Input:
Run many models with a range of parameter sets.
Parameters
----------
ModelClass: callable
A class or factory function that returns a model object by
calling `ModelClass(model_kwargs)`
model_kwargs: dict
See `ModelClass` explanation.
t_output_every: float
see :class:`Runner`.
t_upto: float
Run each model until the time is equal to this
field: str
The name of the field to be varied, whose values are in `vals`.
vals: array_like
Iterable of values to use to instantiate each Model object.
parallel: bool
Whether or not to run the models in parallel, using the Multiprocessing
library. If `True`, the number of concurrent tasks will be equal to
one less than the number of available cores detected.
### Response:
def run_field_scan(ModelClass, model_kwargs, t_output_every, t_upto, field,
vals, force_resume=True, parallel=False):
"""Run many models with a range of parameter sets.
Parameters
----------
ModelClass: callable
A class or factory function that returns a model object by
calling `ModelClass(model_kwargs)`
model_kwargs: dict
See `ModelClass` explanation.
t_output_every: float
see :class:`Runner`.
t_upto: float
Run each model until the time is equal to this
field: str
The name of the field to be varied, whose values are in `vals`.
vals: array_like
Iterable of values to use to instantiate each Model object.
parallel: bool
Whether or not to run the models in parallel, using the Multiprocessing
library. If `True`, the number of concurrent tasks will be equal to
one less than the number of available cores detected.
"""
model_kwarg_sets = [dict(model_kwargs, field=val) for val in vals]
run_kwarg_scan(ModelClass, model_kwarg_sets,
t_output_every, t_upto, force_resume, parallel) |
def _auto_config(cla55: Type[T]) -> Config[T]:
"""
Create the ``Config`` for a class by reflecting on its ``__init__``
method and applying a few hacks.
"""
typ3 = _get_config_type(cla55)
# Don't include self, or vocab
names_to_ignore = {"self", "vocab"}
# Hack for RNNs
if cla55 in [torch.nn.RNN, torch.nn.LSTM, torch.nn.GRU]:
cla55 = torch.nn.RNNBase
names_to_ignore.add("mode")
if isinstance(cla55, type):
# It's a class, so inspect its constructor
function_to_inspect = cla55.__init__
else:
# It's a function, so inspect it, and ignore tensor
function_to_inspect = cla55
names_to_ignore.add("tensor")
argspec = inspect.getfullargspec(function_to_inspect)
comments = _docspec_comments(cla55)
items: List[ConfigItem] = []
num_args = len(argspec.args)
defaults = list(argspec.defaults or [])
num_default_args = len(defaults)
num_non_default_args = num_args - num_default_args
# Required args all come first, default args at the end.
defaults = [_NO_DEFAULT for _ in range(num_non_default_args)] + defaults
for name, default in zip(argspec.args, defaults):
if name in names_to_ignore:
continue
annotation = argspec.annotations.get(name)
comment = comments.get(name)
# Don't include Model, the only place you'd specify that is top-level.
if annotation == Model:
continue
# Don't include DataIterator, the only place you'd specify that is top-level.
if annotation == DataIterator:
continue
# Don't include params for an Optimizer
if torch.optim.Optimizer in getattr(cla55, '__bases__', ()) and name == "params":
continue
# Don't include datasets in the trainer
if cla55 == Trainer and name.endswith("_dataset"):
continue
# Hack in our Optimizer class to the trainer
if cla55 == Trainer and annotation == torch.optim.Optimizer:
annotation = AllenNLPOptimizer
# Hack in embedding num_embeddings as optional (it can be inferred from the pretrained file)
if cla55 == Embedding and name == "num_embeddings":
default = None
items.append(ConfigItem(name, annotation, default, comment))
# More hacks, Embedding
if cla55 == Embedding:
items.insert(1, ConfigItem("pretrained_file", str, None))
return Config(items, typ3=typ3) | Create the ``Config`` for a class by reflecting on its ``__init__``
method and applying a few hacks. | Below is the the instruction that describes the task:
### Input:
Create the ``Config`` for a class by reflecting on its ``__init__``
method and applying a few hacks.
### Response:
def _auto_config(cla55: Type[T]) -> Config[T]:
"""
Create the ``Config`` for a class by reflecting on its ``__init__``
method and applying a few hacks.
"""
typ3 = _get_config_type(cla55)
# Don't include self, or vocab
names_to_ignore = {"self", "vocab"}
# Hack for RNNs
if cla55 in [torch.nn.RNN, torch.nn.LSTM, torch.nn.GRU]:
cla55 = torch.nn.RNNBase
names_to_ignore.add("mode")
if isinstance(cla55, type):
# It's a class, so inspect its constructor
function_to_inspect = cla55.__init__
else:
# It's a function, so inspect it, and ignore tensor
function_to_inspect = cla55
names_to_ignore.add("tensor")
argspec = inspect.getfullargspec(function_to_inspect)
comments = _docspec_comments(cla55)
items: List[ConfigItem] = []
num_args = len(argspec.args)
defaults = list(argspec.defaults or [])
num_default_args = len(defaults)
num_non_default_args = num_args - num_default_args
# Required args all come first, default args at the end.
defaults = [_NO_DEFAULT for _ in range(num_non_default_args)] + defaults
for name, default in zip(argspec.args, defaults):
if name in names_to_ignore:
continue
annotation = argspec.annotations.get(name)
comment = comments.get(name)
# Don't include Model, the only place you'd specify that is top-level.
if annotation == Model:
continue
# Don't include DataIterator, the only place you'd specify that is top-level.
if annotation == DataIterator:
continue
# Don't include params for an Optimizer
if torch.optim.Optimizer in getattr(cla55, '__bases__', ()) and name == "params":
continue
# Don't include datasets in the trainer
if cla55 == Trainer and name.endswith("_dataset"):
continue
# Hack in our Optimizer class to the trainer
if cla55 == Trainer and annotation == torch.optim.Optimizer:
annotation = AllenNLPOptimizer
# Hack in embedding num_embeddings as optional (it can be inferred from the pretrained file)
if cla55 == Embedding and name == "num_embeddings":
default = None
items.append(ConfigItem(name, annotation, default, comment))
# More hacks, Embedding
if cla55 == Embedding:
items.insert(1, ConfigItem("pretrained_file", str, None))
return Config(items, typ3=typ3) |
def on_clipboard_mode_change(self, clipboard_mode):
"""Notification when the shared clipboard mode changes.
in clipboard_mode of type :class:`ClipboardMode`
The new shared clipboard mode.
"""
if not isinstance(clipboard_mode, ClipboardMode):
raise TypeError("clipboard_mode can only be an instance of type ClipboardMode")
self._call("onClipboardModeChange",
in_p=[clipboard_mode]) | Notification when the shared clipboard mode changes.
in clipboard_mode of type :class:`ClipboardMode`
The new shared clipboard mode. | Below is the the instruction that describes the task:
### Input:
Notification when the shared clipboard mode changes.
in clipboard_mode of type :class:`ClipboardMode`
The new shared clipboard mode.
### Response:
def on_clipboard_mode_change(self, clipboard_mode):
"""Notification when the shared clipboard mode changes.
in clipboard_mode of type :class:`ClipboardMode`
The new shared clipboard mode.
"""
if not isinstance(clipboard_mode, ClipboardMode):
raise TypeError("clipboard_mode can only be an instance of type ClipboardMode")
self._call("onClipboardModeChange",
in_p=[clipboard_mode]) |
def note_revert(self, note_id, version):
"""Function to revert a specific note (Requires login) (UNTESTED).
Parameters:
note_id (int): The note id to update.
version (int): The version to revert to.
"""
params = {'id': note_id, 'version': version}
return self._get('note/revert', params, method='PUT') | Function to revert a specific note (Requires login) (UNTESTED).
Parameters:
note_id (int): The note id to update.
version (int): The version to revert to. | Below is the the instruction that describes the task:
### Input:
Function to revert a specific note (Requires login) (UNTESTED).
Parameters:
note_id (int): The note id to update.
version (int): The version to revert to.
### Response:
def note_revert(self, note_id, version):
"""Function to revert a specific note (Requires login) (UNTESTED).
Parameters:
note_id (int): The note id to update.
version (int): The version to revert to.
"""
params = {'id': note_id, 'version': version}
return self._get('note/revert', params, method='PUT') |
def clear_first_angle_projection(self):
"""stub"""
if (self.get_first_angle_projection_metadata().is_read_only() or
self.get_first_angle_projection_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['firstAngle'] = \
self._first_angle_metadata['default_boolean_values'][0] | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_first_angle_projection(self):
"""stub"""
if (self.get_first_angle_projection_metadata().is_read_only() or
self.get_first_angle_projection_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['firstAngle'] = \
self._first_angle_metadata['default_boolean_values'][0] |
def _convert_from(data):
"""Internal function that will be hooked to the native `json.loads`
Find the right deserializer for a given value, taking into account
the internal deserializer registry.
"""
try:
module, klass_name = data['__class__'].rsplit('.', 1)
klass = getattr(import_module(module), klass_name)
except (ImportError, AttributeError, KeyError):
# But I still haven't found what I'm looking for
#
# Waiting for three different exceptions here. KeyError will
# raise if can't find the "__class__" entry in the json `data`
# dictionary. ImportError happens when the module present in the
# dotted name can't be resolved. Finally, the AttributeError
# happens when we can find the module, but couldn't find the
# class on it.
return data
return deserialize(klass, data['__value__']) | Internal function that will be hooked to the native `json.loads`
Find the right deserializer for a given value, taking into account
the internal deserializer registry. | Below is the the instruction that describes the task:
### Input:
Internal function that will be hooked to the native `json.loads`
Find the right deserializer for a given value, taking into account
the internal deserializer registry.
### Response:
def _convert_from(data):
"""Internal function that will be hooked to the native `json.loads`
Find the right deserializer for a given value, taking into account
the internal deserializer registry.
"""
try:
module, klass_name = data['__class__'].rsplit('.', 1)
klass = getattr(import_module(module), klass_name)
except (ImportError, AttributeError, KeyError):
# But I still haven't found what I'm looking for
#
# Waiting for three different exceptions here. KeyError will
# raise if can't find the "__class__" entry in the json `data`
# dictionary. ImportError happens when the module present in the
# dotted name can't be resolved. Finally, the AttributeError
# happens when we can find the module, but couldn't find the
# class on it.
return data
return deserialize(klass, data['__value__']) |
def requires(self, require=None):
"""Requires
Sets the require rules used to validate the Parent
Arguments:
require {dict} -- A dictionary expressing requirements of fields
Raises:
ValueError
Returns:
None
"""
# If require is None, this is a getter
if require is None:
return self._requires
# If it's not a valid dict
if not isinstance(require, dict):
raise ValueError('__require__')
# Go through each key and make sure it goes with a field
for k,v in iteritems(require):
# If the field doesn't exist
if k not in self._nodes:
raise ValueError('__require__[%s]' % str(k))
# If the value is a string
if isinstance(v, basestring):
v = [v]
# Else if it's not a list type
elif not isinstance(v, (tuple,list)):
raise ValueError('__require__[%s]' % str(k))
# Make sure each required field also exists
for s in v:
if s not in self._nodes:
raise ValueError('__require__[%s]: %s' % (str(k), str(v)))
# If it's all good
self._requires[k] = v | Requires
Sets the require rules used to validate the Parent
Arguments:
require {dict} -- A dictionary expressing requirements of fields
Raises:
ValueError
Returns:
None | Below is the the instruction that describes the task:
### Input:
Requires
Sets the require rules used to validate the Parent
Arguments:
require {dict} -- A dictionary expressing requirements of fields
Raises:
ValueError
Returns:
None
### Response:
def requires(self, require=None):
"""Requires
Sets the require rules used to validate the Parent
Arguments:
require {dict} -- A dictionary expressing requirements of fields
Raises:
ValueError
Returns:
None
"""
# If require is None, this is a getter
if require is None:
return self._requires
# If it's not a valid dict
if not isinstance(require, dict):
raise ValueError('__require__')
# Go through each key and make sure it goes with a field
for k,v in iteritems(require):
# If the field doesn't exist
if k not in self._nodes:
raise ValueError('__require__[%s]' % str(k))
# If the value is a string
if isinstance(v, basestring):
v = [v]
# Else if it's not a list type
elif not isinstance(v, (tuple,list)):
raise ValueError('__require__[%s]' % str(k))
# Make sure each required field also exists
for s in v:
if s not in self._nodes:
raise ValueError('__require__[%s]: %s' % (str(k), str(v)))
# If it's all good
self._requires[k] = v |
def category(self, category=None):
"""
If category is given, modify the URL correspondingly, return the
current category otherwise.
"""
if category is None:
return int(self.url.category)
self.url.category = str(category) | If category is given, modify the URL correspondingly, return the
current category otherwise. | Below is the the instruction that describes the task:
### Input:
If category is given, modify the URL correspondingly, return the
current category otherwise.
### Response:
def category(self, category=None):
"""
If category is given, modify the URL correspondingly, return the
current category otherwise.
"""
if category is None:
return int(self.url.category)
self.url.category = str(category) |
def split(self, point=None):
"""
Split this sequence into two halves and return them. The original
sequence remains unmodified.
:param point: defines the split point, if None then the centre is used
:return: two Sequence objects -- one for each side
"""
if point is None:
point = len(self) / 2
r1 = Sequence(self.name + ".1", self.sequenceData[:point])
r2 = Sequence(self.name + ".2", self.sequenceData[point:])
return r1, r2 | Split this sequence into two halves and return them. The original
sequence remains unmodified.
:param point: defines the split point, if None then the centre is used
:return: two Sequence objects -- one for each side | Below is the the instruction that describes the task:
### Input:
Split this sequence into two halves and return them. The original
sequence remains unmodified.
:param point: defines the split point, if None then the centre is used
:return: two Sequence objects -- one for each side
### Response:
def split(self, point=None):
"""
Split this sequence into two halves and return them. The original
sequence remains unmodified.
:param point: defines the split point, if None then the centre is used
:return: two Sequence objects -- one for each side
"""
if point is None:
point = len(self) / 2
r1 = Sequence(self.name + ".1", self.sequenceData[:point])
r2 = Sequence(self.name + ".2", self.sequenceData[point:])
return r1, r2 |
def connect_to_database_odbc_access(self,
dsn: str,
autocommit: bool = True) -> None:
"""Connects to an Access database via ODBC, with the DSN
prespecified."""
self.connect(engine=ENGINE_ACCESS, interface=INTERFACE_ODBC,
dsn=dsn, autocommit=autocommit) | Connects to an Access database via ODBC, with the DSN
prespecified. | Below is the the instruction that describes the task:
### Input:
Connects to an Access database via ODBC, with the DSN
prespecified.
### Response:
def connect_to_database_odbc_access(self,
dsn: str,
autocommit: bool = True) -> None:
"""Connects to an Access database via ODBC, with the DSN
prespecified."""
self.connect(engine=ENGINE_ACCESS, interface=INTERFACE_ODBC,
dsn=dsn, autocommit=autocommit) |
def get_text(self):
'''
::returns:
a rendered string representation of the given row
'''
row_lines = []
for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=' '):
row_lines.append(' '.join(line))
return '\n'.join(row_lines) | ::returns:
a rendered string representation of the given row | Below is the the instruction that describes the task:
### Input:
::returns:
a rendered string representation of the given row
### Response:
def get_text(self):
'''
::returns:
a rendered string representation of the given row
'''
row_lines = []
for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=' '):
row_lines.append(' '.join(line))
return '\n'.join(row_lines) |
def add_record(self, record):
"""Add or update a given DNS record"""
rec = self.get_record(record._record_type, record.host)
if rec:
rec = record
for i,r in enumerate(self._entries):
if r._record_type == record._record_type \
and r.host == record.host:
self._entries[i] = record
else:
self._entries.append(record)
self.sort()
return True | Add or update a given DNS record | Below is the the instruction that describes the task:
### Input:
Add or update a given DNS record
### Response:
def add_record(self, record):
"""Add or update a given DNS record"""
rec = self.get_record(record._record_type, record.host)
if rec:
rec = record
for i,r in enumerate(self._entries):
if r._record_type == record._record_type \
and r.host == record.host:
self._entries[i] = record
else:
self._entries.append(record)
self.sort()
return True |
def generateDHCPOptionsTemplate(self, address_family):
"""
Generate boilerplate dictionary to hold dhcp options
:param str address_family: dhcpv4 or dhcpv6
:return: dict containing valid option set for address family
"""
from ns1.ipam import DHCPOptions
options = {}
for option in DHCPOptions.OPTIONS[address_family]:
options[option] = ""
return options | Generate boilerplate dictionary to hold dhcp options
:param str address_family: dhcpv4 or dhcpv6
:return: dict containing valid option set for address family | Below is the the instruction that describes the task:
### Input:
Generate boilerplate dictionary to hold dhcp options
:param str address_family: dhcpv4 or dhcpv6
:return: dict containing valid option set for address family
### Response:
def generateDHCPOptionsTemplate(self, address_family):
"""
Generate boilerplate dictionary to hold dhcp options
:param str address_family: dhcpv4 or dhcpv6
:return: dict containing valid option set for address family
"""
from ns1.ipam import DHCPOptions
options = {}
for option in DHCPOptions.OPTIONS[address_family]:
options[option] = ""
return options |
def create_response_signature(self, string_message, zone):
""" Basic helper function to keep code clean for defining a response message signature """
zz = ''
if zone is not None:
zz = hex(int(zone)-1).replace('0x', '') # RNET requires zone value to be zero based
string_message = string_message.replace('@zz', zz) # Replace zone parameter
return string_message | Basic helper function to keep code clean for defining a response message signature | Below is the the instruction that describes the task:
### Input:
Basic helper function to keep code clean for defining a response message signature
### Response:
def create_response_signature(self, string_message, zone):
""" Basic helper function to keep code clean for defining a response message signature """
zz = ''
if zone is not None:
zz = hex(int(zone)-1).replace('0x', '') # RNET requires zone value to be zero based
string_message = string_message.replace('@zz', zz) # Replace zone parameter
return string_message |
def trip(self, origin_id, dest_id, date=None):
""" trip """
date = date if date else datetime.now()
response = self._request(
'trip',
originId=origin_id,
destId=dest_id,
date=date.strftime(DATE_FORMAT),
time=date.strftime(TIME_FORMAT))
return _get_node(response, 'TripList', 'Trip') | trip | Below is the the instruction that describes the task:
### Input:
trip
### Response:
def trip(self, origin_id, dest_id, date=None):
""" trip """
date = date if date else datetime.now()
response = self._request(
'trip',
originId=origin_id,
destId=dest_id,
date=date.strftime(DATE_FORMAT),
time=date.strftime(TIME_FORMAT))
return _get_node(response, 'TripList', 'Trip') |
def parallel_townsend_lsp(times, mags, startp, endp,
stepsize=1.0e-4,
nworkers=4):
'''
This calculates the Lomb-Scargle periodogram for the frequencies
corresponding to the period interval (startp, endp) using a frequency step
size of stepsize cycles/day. This uses the algorithm in Townsend 2010.
'''
# make sure there are no nans anywhere
finiteind = np.isfinite(times) & np.isfinite(mags)
ftimes, fmags = times[finiteind], mags[finiteind]
# renormalize the mags to zero and scale them so that the variance = 1
nmags = (fmags - np.median(fmags))/np.std(fmags)
startf = 1.0/endp
endf = 1.0/startp
omegas = 2*np.pi*np.arange(startf, endf, stepsize)
# parallel map the lsp calculations
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(ftimes, nmags, x) for x in omegas]
lsp = pool.map(townsend_lombscargle_wrapper, tasks)
pool.close()
pool.join()
return np.array(omegas), np.array(lsp) | This calculates the Lomb-Scargle periodogram for the frequencies
corresponding to the period interval (startp, endp) using a frequency step
size of stepsize cycles/day. This uses the algorithm in Townsend 2010. | Below is the the instruction that describes the task:
### Input:
This calculates the Lomb-Scargle periodogram for the frequencies
corresponding to the period interval (startp, endp) using a frequency step
size of stepsize cycles/day. This uses the algorithm in Townsend 2010.
### Response:
def parallel_townsend_lsp(times, mags, startp, endp,
stepsize=1.0e-4,
nworkers=4):
'''
This calculates the Lomb-Scargle periodogram for the frequencies
corresponding to the period interval (startp, endp) using a frequency step
size of stepsize cycles/day. This uses the algorithm in Townsend 2010.
'''
# make sure there are no nans anywhere
finiteind = np.isfinite(times) & np.isfinite(mags)
ftimes, fmags = times[finiteind], mags[finiteind]
# renormalize the mags to zero and scale them so that the variance = 1
nmags = (fmags - np.median(fmags))/np.std(fmags)
startf = 1.0/endp
endf = 1.0/startp
omegas = 2*np.pi*np.arange(startf, endf, stepsize)
# parallel map the lsp calculations
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(ftimes, nmags, x) for x in omegas]
lsp = pool.map(townsend_lombscargle_wrapper, tasks)
pool.close()
pool.join()
return np.array(omegas), np.array(lsp) |
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={},
start=None, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (like Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
The eigenvector calculation is done by the power iteration method.
It has no guarantee of convergence.
A starting vector for the power iteration can be given in the start dict.
You can adjust the importance of a node with the rating dictionary,
which links node id's to a score.
The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):
https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py
"""
G = graph.keys()
W = adjacency (graph, directed=True, reversed=reversed)
def _normalize(x):
s = sum(x.values())
if s != 0: s = 1.0 / s
for k in x:
x[k] *= s
x = start
if x is None:
x = dict([(n, random()) for n in G])
_normalize(x)
# Power method: y = Ax multiplication.
for i in range(iterations):
x0 = x
x = dict.fromkeys(x0.keys(), 0)
for n in x:
for nbr in W[n]:
r = 1
if rating.has_key(n): r = rating[n]
x[n] += 0.01 + x0[nbr] * W[n][nbr] * r
_normalize(x)
e = sum([abs(x[n]-x0[n]) for n in x])
if e < len(graph.nodes) * tolerance:
if normalized:
# Normalize between 0.0 and 1.0.
m = max(x.values())
if m == 0: m = 1
x = dict([(id, w/m) for id, w in x.iteritems()])
return x
#raise NoConvergenceError
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict([(n, 0) for n in G]) | Eigenvector centrality for nodes in the graph (like Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
The eigenvector calculation is done by the power iteration method.
It has no guarantee of convergence.
A starting vector for the power iteration can be given in the start dict.
You can adjust the importance of a node with the rating dictionary,
which links node id's to a score.
The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):
https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py | Below is the the instruction that describes the task:
### Input:
Eigenvector centrality for nodes in the graph (like Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
The eigenvector calculation is done by the power iteration method.
It has no guarantee of convergence.
A starting vector for the power iteration can be given in the start dict.
You can adjust the importance of a node with the rating dictionary,
which links node id's to a score.
The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):
https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py
### Response:
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={},
start=None, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (like Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
The eigenvector calculation is done by the power iteration method.
It has no guarantee of convergence.
A starting vector for the power iteration can be given in the start dict.
You can adjust the importance of a node with the rating dictionary,
which links node id's to a score.
The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):
https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py
"""
G = graph.keys()
W = adjacency (graph, directed=True, reversed=reversed)
def _normalize(x):
s = sum(x.values())
if s != 0: s = 1.0 / s
for k in x:
x[k] *= s
x = start
if x is None:
x = dict([(n, random()) for n in G])
_normalize(x)
# Power method: y = Ax multiplication.
for i in range(iterations):
x0 = x
x = dict.fromkeys(x0.keys(), 0)
for n in x:
for nbr in W[n]:
r = 1
if rating.has_key(n): r = rating[n]
x[n] += 0.01 + x0[nbr] * W[n][nbr] * r
_normalize(x)
e = sum([abs(x[n]-x0[n]) for n in x])
if e < len(graph.nodes) * tolerance:
if normalized:
# Normalize between 0.0 and 1.0.
m = max(x.values())
if m == 0: m = 1
x = dict([(id, w/m) for id, w in x.iteritems()])
return x
#raise NoConvergenceError
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict([(n, 0) for n in G]) |
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns a float32 tensor with shape
[batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", [logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights | Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns a float32 tensor with shape
[batch_size, max(length_logits, length_labels)] | Below is the the instruction that describes the task:
### Input:
Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns a float32 tensor with shape
[batch_size, max(length_logits, length_labels)]
### Response:
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns a float32 tensor with shape
[batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", [logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights |
def add_yaml_file_content_to_database(
self,
filepath,
deleteFile=False
):
"""*given a file to a yaml file, add yaml file content to database*
**Key Arguments:**
- ``filepath`` -- the path to the yaml file
- ``deleteFile`` -- delete the yaml file when its content has been added to the database. Default *False*
**Return:**
- None
**Usage:**
To parse and import the contents of a single yaml file into the database, use the following:
.. code-block:: python
from fundamentals.mysql import yaml_to_database
# PARSE YAML FILE CONTENTS AND ADD TO DATABASE
yaml2db = yaml_to_database(
log=log,
settings=settings,
dbConn=dbConn
)
yaml2db.add_yaml_file_content_to_database(
filepath=${1:"/path/to/file.yaml"},
deleteFile=True
)
"""
self.log.debug(
'completed the ````add_yaml_file_content_to_database`` method')
import codecs
try:
self.log.debug("attempting to open the file %s" % (filepath,))
readFile = codecs.open(filepath, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (filepath,)
self.log.critical(message)
raise IOError(message)
readFile.close()
matchObject = re.finditer(
r'(^|\n)(?P<key>[^\:]*)\:\s(?P<value>.*?)(\n|$)',
thisData,
flags=re.M | re.S # re.S
)
yamlContent = {}
for match in matchObject:
if match.group("value")[0] == '"' and match.group("value")[-1] == '"':
v = match.group("value")[1:-1]
elif match.group("value")[0] == "'" and match.group("value")[-1] == "'":
v = match.group("value")[1:-1]
else:
v = match.group("value")
yamlContent[match.group("key")] = v
if "table" not in yamlContent:
self.log.warning(
'A table value is need in the yaml content to indicate which database table to add the content to: %(filepath)s' % locals())
return None
# NOTE THERE MAY BE MORE THAN ONE DATABASE TABLE
dbTablesTmp = yamlContent["table"].split(",")
del yamlContent["table"]
dbTables = []
dbTables[:] = [d.strip() for d in dbTablesTmp]
# UNSHORTEN URL
try:
r = requests.head(yamlContent["url"], allow_redirects=True)
yamlContent["url"] = r.url
except:
pass
yamlContent["original_yaml_path"] = filepath
if "url" in yamlContent:
uniqueKeyList = ["url"]
else:
uniqueKeyList = []
for t in dbTables:
convert_dictionary_to_mysql_table(
dbConn=self.dbConn,
log=self.log,
dictionary=yamlContent,
dbTableName=t,
uniqueKeyList=uniqueKeyList,
dateModified=True,
returnInsertOnly=False,
replace=True
)
if deleteFile:
os.remove(filepath)
self.log.debug(
'completed the ``add_yaml_file_content_to_database`` method')
return None | *given a file to a yaml file, add yaml file content to database*
**Key Arguments:**
- ``filepath`` -- the path to the yaml file
- ``deleteFile`` -- delete the yaml file when its content has been added to the database. Default *False*
**Return:**
- None
**Usage:**
To parse and import the contents of a single yaml file into the database, use the following:
.. code-block:: python
from fundamentals.mysql import yaml_to_database
# PARSE YAML FILE CONTENTS AND ADD TO DATABASE
yaml2db = yaml_to_database(
log=log,
settings=settings,
dbConn=dbConn
)
yaml2db.add_yaml_file_content_to_database(
filepath=${1:"/path/to/file.yaml"},
deleteFile=True
) | Below is the the instruction that describes the task:
### Input:
*given a file to a yaml file, add yaml file content to database*
**Key Arguments:**
- ``filepath`` -- the path to the yaml file
- ``deleteFile`` -- delete the yaml file when its content has been added to the database. Default *False*
**Return:**
- None
**Usage:**
To parse and import the contents of a single yaml file into the database, use the following:
.. code-block:: python
from fundamentals.mysql import yaml_to_database
# PARSE YAML FILE CONTENTS AND ADD TO DATABASE
yaml2db = yaml_to_database(
log=log,
settings=settings,
dbConn=dbConn
)
yaml2db.add_yaml_file_content_to_database(
filepath=${1:"/path/to/file.yaml"},
deleteFile=True
)
### Response:
def add_yaml_file_content_to_database(
self,
filepath,
deleteFile=False
):
"""*given a file to a yaml file, add yaml file content to database*
**Key Arguments:**
- ``filepath`` -- the path to the yaml file
- ``deleteFile`` -- delete the yaml file when its content has been added to the database. Default *False*
**Return:**
- None
**Usage:**
To parse and import the contents of a single yaml file into the database, use the following:
.. code-block:: python
from fundamentals.mysql import yaml_to_database
# PARSE YAML FILE CONTENTS AND ADD TO DATABASE
yaml2db = yaml_to_database(
log=log,
settings=settings,
dbConn=dbConn
)
yaml2db.add_yaml_file_content_to_database(
filepath=${1:"/path/to/file.yaml"},
deleteFile=True
)
"""
self.log.debug(
'completed the ````add_yaml_file_content_to_database`` method')
import codecs
try:
self.log.debug("attempting to open the file %s" % (filepath,))
readFile = codecs.open(filepath, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (filepath,)
self.log.critical(message)
raise IOError(message)
readFile.close()
matchObject = re.finditer(
r'(^|\n)(?P<key>[^\:]*)\:\s(?P<value>.*?)(\n|$)',
thisData,
flags=re.M | re.S # re.S
)
yamlContent = {}
for match in matchObject:
if match.group("value")[0] == '"' and match.group("value")[-1] == '"':
v = match.group("value")[1:-1]
elif match.group("value")[0] == "'" and match.group("value")[-1] == "'":
v = match.group("value")[1:-1]
else:
v = match.group("value")
yamlContent[match.group("key")] = v
if "table" not in yamlContent:
self.log.warning(
'A table value is need in the yaml content to indicate which database table to add the content to: %(filepath)s' % locals())
return None
# NOTE THERE MAY BE MORE THAN ONE DATABASE TABLE
dbTablesTmp = yamlContent["table"].split(",")
del yamlContent["table"]
dbTables = []
dbTables[:] = [d.strip() for d in dbTablesTmp]
# UNSHORTEN URL
try:
r = requests.head(yamlContent["url"], allow_redirects=True)
yamlContent["url"] = r.url
except:
pass
yamlContent["original_yaml_path"] = filepath
if "url" in yamlContent:
uniqueKeyList = ["url"]
else:
uniqueKeyList = []
for t in dbTables:
convert_dictionary_to_mysql_table(
dbConn=self.dbConn,
log=self.log,
dictionary=yamlContent,
dbTableName=t,
uniqueKeyList=uniqueKeyList,
dateModified=True,
returnInsertOnly=False,
replace=True
)
if deleteFile:
os.remove(filepath)
self.log.debug(
'completed the ``add_yaml_file_content_to_database`` method')
return None |
def key(self):
"""Embedded supports curies."""
if self.curie is None:
return self.name
return ":".join((self.curie.name, self.name)) | Embedded supports curies. | Below is the the instruction that describes the task:
### Input:
Embedded supports curies.
### Response:
def key(self):
"""Embedded supports curies."""
if self.curie is None:
return self.name
return ":".join((self.curie.name, self.name)) |
def update_field_forward_refs(field: 'Field', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this Field, globalns and localns.
"""
if type(field.type_) == ForwardRef:
field.type_ = field.type_._evaluate(globalns, localns or None) # type: ignore
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns) | Try to update ForwardRefs on fields based on this Field, globalns and localns. | Below is the the instruction that describes the task:
### Input:
Try to update ForwardRefs on fields based on this Field, globalns and localns.
### Response:
def update_field_forward_refs(field: 'Field', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this Field, globalns and localns.
"""
if type(field.type_) == ForwardRef:
field.type_ = field.type_._evaluate(globalns, localns or None) # type: ignore
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns) |
def add_parameters(url, parameters):
"""
Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %}
"""
if parameters:
sep = '&' if '?' in url else '?'
return '{0}{1}{2}'.format(url, sep, urlencode(parameters))
return url | Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %} | Below is the the instruction that describes the task:
### Input:
Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %}
### Response:
def add_parameters(url, parameters):
"""
Appends URL-encoded parameters to the base URL. It appends after '&' if
'?' is found in the URL; otherwise it appends using '?'. Keep in mind that
this tag does not take into account the value of existing params; it is
therefore possible to add another value for a pre-existing parameter.
For example::
{% url 'this_view' as current_url %}
{% with complete_url=current_url|add_parameters:request.GET %}
The <a href="{% url 'other' %}?next={{ complete_url|urlencode }}">
other page</a> will redirect back to the current page (including
any GET parameters).
{% endwith %}
"""
if parameters:
sep = '&' if '?' in url else '?'
return '{0}{1}{2}'.format(url, sep, urlencode(parameters))
return url |
def cli(self):
"""Read program parameters from command line and configuration files
We support both command line arguments and configuration files.
The command line arguments have always precedence over any
configuration file parameters. This allows us to have most of
our options in files but override them individually from the command
line.
"""
# first we parse only for a configuration file with an initial parser
init_parser = argparse.ArgumentParser(
description = __doc__,
formatter_class = argparse.RawDescriptionHelpFormatter,
add_help = False)
# we don't use a file metavar because we need to support both json/yml
init_parser.add_argument("-c", "--config", action="store",
help = "read from target configuration file")
args, remaining_args = init_parser.parse_known_args()
# read from supplied configuration file or try to find one in the
# current working directory
reader = None
config_file = args.config or self.detect_config()
if config_file:
with open(config_file, 'r') as f:
reader = ConfigReader(f.read())
# implement the rest cli options
parser = argparse.ArgumentParser(
parents = [init_parser],
add_help = True,
description = "Static wiki generator using Github's gists as pages")
application_opts = reader.application if reader else {}
default_options = self.merge_with_default_options(application_opts)
parser.set_defaults(**default_options)
parser.add_argument("-v", "--version", action="version",
version="%(prog)s {0}".format(__version__))
parser.add_argument("-t", "--templates", action="store",
help="read templates from specified folder")
parser.add_argument("-o", "--output", action="store",
help="generate static files in target folder")
parser.add_argument("-u", "--baseurl", action="store",
help="use a specific base URL instead of /")
if reader:
self.config = reader.config
self.config['app'] = vars(parser.parse_args(remaining_args))
# parsing of command line argumenents is done check if we have gists
if 'gists' not in self.config:
raise WigikiConfigError("Cannot read gists. "
"Check your configuration file.") | Read program parameters from command line and configuration files
We support both command line arguments and configuration files.
The command line arguments have always precedence over any
configuration file parameters. This allows us to have most of
our options in files but override them individually from the command
line. | Below is the the instruction that describes the task:
### Input:
Read program parameters from command line and configuration files
We support both command line arguments and configuration files.
The command line arguments have always precedence over any
configuration file parameters. This allows us to have most of
our options in files but override them individually from the command
line.
### Response:
def cli(self):
"""Read program parameters from command line and configuration files
We support both command line arguments and configuration files.
The command line arguments have always precedence over any
configuration file parameters. This allows us to have most of
our options in files but override them individually from the command
line.
"""
# first we parse only for a configuration file with an initial parser
init_parser = argparse.ArgumentParser(
description = __doc__,
formatter_class = argparse.RawDescriptionHelpFormatter,
add_help = False)
# we don't use a file metavar because we need to support both json/yml
init_parser.add_argument("-c", "--config", action="store",
help = "read from target configuration file")
args, remaining_args = init_parser.parse_known_args()
# read from supplied configuration file or try to find one in the
# current working directory
reader = None
config_file = args.config or self.detect_config()
if config_file:
with open(config_file, 'r') as f:
reader = ConfigReader(f.read())
# implement the rest cli options
parser = argparse.ArgumentParser(
parents = [init_parser],
add_help = True,
description = "Static wiki generator using Github's gists as pages")
application_opts = reader.application if reader else {}
default_options = self.merge_with_default_options(application_opts)
parser.set_defaults(**default_options)
parser.add_argument("-v", "--version", action="version",
version="%(prog)s {0}".format(__version__))
parser.add_argument("-t", "--templates", action="store",
help="read templates from specified folder")
parser.add_argument("-o", "--output", action="store",
help="generate static files in target folder")
parser.add_argument("-u", "--baseurl", action="store",
help="use a specific base URL instead of /")
if reader:
self.config = reader.config
self.config['app'] = vars(parser.parse_args(remaining_args))
# parsing of command line argumenents is done check if we have gists
if 'gists' not in self.config:
raise WigikiConfigError("Cannot read gists. "
"Check your configuration file.") |
def update_metadata_for_node(self, node, metadata):
"""
Updates the existing metadata for the specified node with
the supplied dictionary.
"""
return self.manager.update_metadata(self, metadata, node=node) | Updates the existing metadata for the specified node with
the supplied dictionary. | Below is the the instruction that describes the task:
### Input:
Updates the existing metadata for the specified node with
the supplied dictionary.
### Response:
def update_metadata_for_node(self, node, metadata):
"""
Updates the existing metadata for the specified node with
the supplied dictionary.
"""
return self.manager.update_metadata(self, metadata, node=node) |
def _group_percentile(self, clusters, adj_list, counts):
''' Return "groups" for the the percentile method. Note
that grouping isn't really compatible with the percentile
method. This just returns the retained UMIs in a structure similar
to other methods '''
retained_umis = self._get_best_percentile(clusters, counts)
groups = [[x] for x in retained_umis]
return groups | Return "groups" for the the percentile method. Note
that grouping isn't really compatible with the percentile
method. This just returns the retained UMIs in a structure similar
to other methods | Below is the the instruction that describes the task:
### Input:
Return "groups" for the the percentile method. Note
that grouping isn't really compatible with the percentile
method. This just returns the retained UMIs in a structure similar
to other methods
### Response:
def _group_percentile(self, clusters, adj_list, counts):
''' Return "groups" for the the percentile method. Note
that grouping isn't really compatible with the percentile
method. This just returns the retained UMIs in a structure similar
to other methods '''
retained_umis = self._get_best_percentile(clusters, counts)
groups = [[x] for x in retained_umis]
return groups |
def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for k, v in self.message.items():
d[k] = decode_header_part(v)
return d | Return only the headers as Python object | Below is the the instruction that describes the task:
### Input:
Return only the headers as Python object
### Response:
def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for k, v in self.message.items():
d[k] = decode_header_part(v)
return d |
def UpdateProtoResources(self, status):
"""Save cpu and network stats, check limits."""
user_cpu = status.cpu_time_used.user_cpu_time
system_cpu = status.cpu_time_used.system_cpu_time
self.context.client_resources.cpu_usage.user_cpu_time += user_cpu
self.context.client_resources.cpu_usage.system_cpu_time += system_cpu
user_cpu_total = self.context.client_resources.cpu_usage.user_cpu_time
system_cpu_total = self.context.client_resources.cpu_usage.system_cpu_time
self.context.network_bytes_sent += status.network_bytes_sent
if self.runner_args.cpu_limit:
if self.runner_args.cpu_limit < (user_cpu_total + system_cpu_total):
# We have exceeded our limit, stop this flow.
raise FlowRunnerError("CPU limit exceeded.")
if self.runner_args.network_bytes_limit:
if (self.runner_args.network_bytes_limit <
self.context.network_bytes_sent):
# We have exceeded our byte limit, stop this flow.
raise FlowRunnerError("Network bytes limit exceeded.") | Save cpu and network stats, check limits. | Below is the the instruction that describes the task:
### Input:
Save cpu and network stats, check limits.
### Response:
def UpdateProtoResources(self, status):
"""Save cpu and network stats, check limits."""
user_cpu = status.cpu_time_used.user_cpu_time
system_cpu = status.cpu_time_used.system_cpu_time
self.context.client_resources.cpu_usage.user_cpu_time += user_cpu
self.context.client_resources.cpu_usage.system_cpu_time += system_cpu
user_cpu_total = self.context.client_resources.cpu_usage.user_cpu_time
system_cpu_total = self.context.client_resources.cpu_usage.system_cpu_time
self.context.network_bytes_sent += status.network_bytes_sent
if self.runner_args.cpu_limit:
if self.runner_args.cpu_limit < (user_cpu_total + system_cpu_total):
# We have exceeded our limit, stop this flow.
raise FlowRunnerError("CPU limit exceeded.")
if self.runner_args.network_bytes_limit:
if (self.runner_args.network_bytes_limit <
self.context.network_bytes_sent):
# We have exceeded our byte limit, stop this flow.
raise FlowRunnerError("Network bytes limit exceeded.") |
def iter_network_events(self, number=-1, etag=None):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event, etag) | Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`\ s | Below is the the instruction that describes the task:
### Input:
Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`\ s
### Response:
def iter_network_events(self, number=-1, etag=None):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event, etag) |
def run_network(self, traj):
"""Top-level simulation function, pass this to the environment
Performs an individual network run during parameter exploration.
`run_network` does not need to be called by the user. If this
method (not this one of the NetworkManager)
is passed to an :class:`~pypet.environment.Environment` with this NetworkManager,
`run_network` and :func:`~pypet.brian2.network.NetworkManager.build`
are automatically called for each individual experimental run.
This function will create a new BRIAN2 network in case one was not pre-run.
The execution of the network run is carried out by
the :class:`~pypet.brian2.network.NetworkRunner` and it's
:func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take
a look at this function's documentation to see the structure of a network run).
:param traj: Trajectory container
"""
# Check if the network was pre-built
if self._pre_built:
if self._pre_run and hasattr(self._network, 'restore'):
self._network.restore('pre_run')
# Temprorary fix for https://github.com/brian-team/brian2/issues/681
self._network.store('pre_run')
self._run_network(traj)
else:
self._run_network(traj) | Top-level simulation function, pass this to the environment
Performs an individual network run during parameter exploration.
`run_network` does not need to be called by the user. If this
method (not this one of the NetworkManager)
is passed to an :class:`~pypet.environment.Environment` with this NetworkManager,
`run_network` and :func:`~pypet.brian2.network.NetworkManager.build`
are automatically called for each individual experimental run.
This function will create a new BRIAN2 network in case one was not pre-run.
The execution of the network run is carried out by
the :class:`~pypet.brian2.network.NetworkRunner` and it's
:func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take
a look at this function's documentation to see the structure of a network run).
:param traj: Trajectory container | Below is the the instruction that describes the task:
### Input:
Top-level simulation function, pass this to the environment
Performs an individual network run during parameter exploration.
`run_network` does not need to be called by the user. If this
method (not this one of the NetworkManager)
is passed to an :class:`~pypet.environment.Environment` with this NetworkManager,
`run_network` and :func:`~pypet.brian2.network.NetworkManager.build`
are automatically called for each individual experimental run.
This function will create a new BRIAN2 network in case one was not pre-run.
The execution of the network run is carried out by
the :class:`~pypet.brian2.network.NetworkRunner` and it's
:func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take
a look at this function's documentation to see the structure of a network run).
:param traj: Trajectory container
### Response:
def run_network(self, traj):
"""Top-level simulation function, pass this to the environment
Performs an individual network run during parameter exploration.
`run_network` does not need to be called by the user. If this
method (not this one of the NetworkManager)
is passed to an :class:`~pypet.environment.Environment` with this NetworkManager,
`run_network` and :func:`~pypet.brian2.network.NetworkManager.build`
are automatically called for each individual experimental run.
This function will create a new BRIAN2 network in case one was not pre-run.
The execution of the network run is carried out by
the :class:`~pypet.brian2.network.NetworkRunner` and it's
:func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take
a look at this function's documentation to see the structure of a network run).
:param traj: Trajectory container
"""
# Check if the network was pre-built
if self._pre_built:
if self._pre_run and hasattr(self._network, 'restore'):
self._network.restore('pre_run')
# Temprorary fix for https://github.com/brian-team/brian2/issues/681
self._network.store('pre_run')
self._run_network(traj)
else:
self._run_network(traj) |
def parse_indented_config(config, current_indent=0, previous_indent=0, nested=False):
"""
This methid basically reads a configuration that conforms to a very poor industry standard
and returns a nested structure that behaves like a dict. For example:
{'enable password whatever': {},
'interface GigabitEthernet1': {
'description "bleh"': {},
'fake nested': {
'nested nested configuration': {}},
'switchport mode trunk': {}},
'interface GigabitEthernet2': {
'no ip address': {}},
'interface GigabitEthernet3': {
'negotiation auto': {},
'no ip address': {},
'shutdown': {}},
'interface Loopback0': {
'description "blah"': {}}}
"""
parsed = OrderedDict()
while True:
if not config:
break
line = config.pop(0)
if line.lstrip().startswith("!"):
continue
last = line.lstrip()
leading_spaces = len(line) - len(last)
# print("current_indent:{}, previous:{}, leading:{} - {}".format(
# current_indent, previous_indent, leading_spaces, line))
if leading_spaces > current_indent:
current = parse_indented_config(
config, leading_spaces, current_indent, True
)
_attach_data_to_path(parsed, last, current, nested)
elif leading_spaces < current_indent:
config.insert(0, line)
break
else:
if not nested:
current = parse_indented_config(
config, leading_spaces, current_indent, True
)
_attach_data_to_path(parsed, last, current, nested)
else:
config.insert(0, line)
break
return parsed | This methid basically reads a configuration that conforms to a very poor industry standard
and returns a nested structure that behaves like a dict. For example:
{'enable password whatever': {},
'interface GigabitEthernet1': {
'description "bleh"': {},
'fake nested': {
'nested nested configuration': {}},
'switchport mode trunk': {}},
'interface GigabitEthernet2': {
'no ip address': {}},
'interface GigabitEthernet3': {
'negotiation auto': {},
'no ip address': {},
'shutdown': {}},
'interface Loopback0': {
'description "blah"': {}}} | Below is the the instruction that describes the task:
### Input:
This methid basically reads a configuration that conforms to a very poor industry standard
and returns a nested structure that behaves like a dict. For example:
{'enable password whatever': {},
'interface GigabitEthernet1': {
'description "bleh"': {},
'fake nested': {
'nested nested configuration': {}},
'switchport mode trunk': {}},
'interface GigabitEthernet2': {
'no ip address': {}},
'interface GigabitEthernet3': {
'negotiation auto': {},
'no ip address': {},
'shutdown': {}},
'interface Loopback0': {
'description "blah"': {}}}
### Response:
def parse_indented_config(config, current_indent=0, previous_indent=0, nested=False):
"""
This methid basically reads a configuration that conforms to a very poor industry standard
and returns a nested structure that behaves like a dict. For example:
{'enable password whatever': {},
'interface GigabitEthernet1': {
'description "bleh"': {},
'fake nested': {
'nested nested configuration': {}},
'switchport mode trunk': {}},
'interface GigabitEthernet2': {
'no ip address': {}},
'interface GigabitEthernet3': {
'negotiation auto': {},
'no ip address': {},
'shutdown': {}},
'interface Loopback0': {
'description "blah"': {}}}
"""
parsed = OrderedDict()
while True:
if not config:
break
line = config.pop(0)
if line.lstrip().startswith("!"):
continue
last = line.lstrip()
leading_spaces = len(line) - len(last)
# print("current_indent:{}, previous:{}, leading:{} - {}".format(
# current_indent, previous_indent, leading_spaces, line))
if leading_spaces > current_indent:
current = parse_indented_config(
config, leading_spaces, current_indent, True
)
_attach_data_to_path(parsed, last, current, nested)
elif leading_spaces < current_indent:
config.insert(0, line)
break
else:
if not nested:
current = parse_indented_config(
config, leading_spaces, current_indent, True
)
_attach_data_to_path(parsed, last, current, nested)
else:
config.insert(0, line)
break
return parsed |
def deserialize(self, value, flags):
"""
Deserialized values based on flags or just return it if it is not serialized.
:param value: Serialized or not value.
:type value: six.string_types, int
:param flags: Value flags
:type flags: int
:return: Deserialized value
:rtype: six.string_types|int
"""
FLAGS = self.FLAGS
if flags & FLAGS['compressed']: # pragma: no branch
value = self.compression.decompress(value)
if flags & FLAGS['binary']:
return value
if flags & FLAGS['integer']:
return int(value)
elif flags & FLAGS['long']:
return long(value)
elif flags & FLAGS['object']:
buf = BytesIO(value)
unpickler = self.unpickler(buf)
return unpickler.load()
if six.PY3:
return value.decode('utf8')
# In Python 2, mimic the behavior of the json library: return a str
# unless the value contains unicode characters.
# in Python 2, if value is a binary (e.g struct.pack("<Q") then decode will fail
try:
value.decode('ascii')
except UnicodeDecodeError:
try:
return value.decode('utf8')
except UnicodeDecodeError:
return value
else:
return value | Deserialized values based on flags or just return it if it is not serialized.
:param value: Serialized or not value.
:type value: six.string_types, int
:param flags: Value flags
:type flags: int
:return: Deserialized value
:rtype: six.string_types|int | Below is the the instruction that describes the task:
### Input:
Deserialized values based on flags or just return it if it is not serialized.
:param value: Serialized or not value.
:type value: six.string_types, int
:param flags: Value flags
:type flags: int
:return: Deserialized value
:rtype: six.string_types|int
### Response:
def deserialize(self, value, flags):
"""
Deserialized values based on flags or just return it if it is not serialized.
:param value: Serialized or not value.
:type value: six.string_types, int
:param flags: Value flags
:type flags: int
:return: Deserialized value
:rtype: six.string_types|int
"""
FLAGS = self.FLAGS
if flags & FLAGS['compressed']: # pragma: no branch
value = self.compression.decompress(value)
if flags & FLAGS['binary']:
return value
if flags & FLAGS['integer']:
return int(value)
elif flags & FLAGS['long']:
return long(value)
elif flags & FLAGS['object']:
buf = BytesIO(value)
unpickler = self.unpickler(buf)
return unpickler.load()
if six.PY3:
return value.decode('utf8')
# In Python 2, mimic the behavior of the json library: return a str
# unless the value contains unicode characters.
# in Python 2, if value is a binary (e.g struct.pack("<Q") then decode will fail
try:
value.decode('ascii')
except UnicodeDecodeError:
try:
return value.decode('utf8')
except UnicodeDecodeError:
return value
else:
return value |
def _onWhat(self, name, line, pos, absPosition):
"""Memorizes an imported item"""
self.__lastImport.what.append(ImportWhat(name, line, pos, absPosition)) | Memorizes an imported item | Below is the the instruction that describes the task:
### Input:
Memorizes an imported item
### Response:
def _onWhat(self, name, line, pos, absPosition):
"""Memorizes an imported item"""
self.__lastImport.what.append(ImportWhat(name, line, pos, absPosition)) |
def changelog(self, api_version, doc):
"""Add a changelog entry for this api."""
doc = textwrap.dedent(doc).strip()
self._changelog[api_version] = doc
self._changelog_locations[api_version] = get_callsite_location() | Add a changelog entry for this api. | Below is the the instruction that describes the task:
### Input:
Add a changelog entry for this api.
### Response:
def changelog(self, api_version, doc):
"""Add a changelog entry for this api."""
doc = textwrap.dedent(doc).strip()
self._changelog[api_version] = doc
self._changelog_locations[api_version] = get_callsite_location() |
def update_connection_public_key(self, connection_id, public_key):
"""Adds the public_key to the connection definition.
Args:
connection_id (str): The identifier for the connection.
public_key (str): The public key used to enforce permissions on
connections.
"""
if connection_id in self._connections:
connection_info = self._connections[connection_id]
self._connections[connection_id] = \
ConnectionInfo(connection_info.connection_type,
connection_info.connection,
connection_info.uri,
connection_info.status,
public_key)
else:
LOGGER.debug("Could not update the public key %s for "
"connection_id %s. The connection does not "
"exist.",
public_key,
connection_id) | Adds the public_key to the connection definition.
Args:
connection_id (str): The identifier for the connection.
public_key (str): The public key used to enforce permissions on
connections. | Below is the the instruction that describes the task:
### Input:
Adds the public_key to the connection definition.
Args:
connection_id (str): The identifier for the connection.
public_key (str): The public key used to enforce permissions on
connections.
### Response:
def update_connection_public_key(self, connection_id, public_key):
"""Adds the public_key to the connection definition.
Args:
connection_id (str): The identifier for the connection.
public_key (str): The public key used to enforce permissions on
connections.
"""
if connection_id in self._connections:
connection_info = self._connections[connection_id]
self._connections[connection_id] = \
ConnectionInfo(connection_info.connection_type,
connection_info.connection,
connection_info.uri,
connection_info.status,
public_key)
else:
LOGGER.debug("Could not update the public key %s for "
"connection_id %s. The connection does not "
"exist.",
public_key,
connection_id) |
def register_pyglet_handler(peng,func,event,raiseErrors=False):
"""
Registers the given pyglet-style event handler for the given pyglet event.
This function allows pyglet-style event handlers to receive events bridged
through the peng3d event system. Internally, this function creates a lambda
function that decodes the arguments and then calls the pyglet-style event handler.
The ``raiseErrors`` flag is passed through to the peng3d event system and will
cause any errors raised by this handler to be ignored.
.. seealso::
See :py:meth:`~peng3d.peng.Peng.addEventListener()` for more information.
"""
peng.addEventListener("pyglet:%s"%event,(lambda data:func(*data["args"])),raiseErrors) | Registers the given pyglet-style event handler for the given pyglet event.
This function allows pyglet-style event handlers to receive events bridged
through the peng3d event system. Internally, this function creates a lambda
function that decodes the arguments and then calls the pyglet-style event handler.
The ``raiseErrors`` flag is passed through to the peng3d event system and will
cause any errors raised by this handler to be ignored.
.. seealso::
See :py:meth:`~peng3d.peng.Peng.addEventListener()` for more information. | Below is the the instruction that describes the task:
### Input:
Registers the given pyglet-style event handler for the given pyglet event.
This function allows pyglet-style event handlers to receive events bridged
through the peng3d event system. Internally, this function creates a lambda
function that decodes the arguments and then calls the pyglet-style event handler.
The ``raiseErrors`` flag is passed through to the peng3d event system and will
cause any errors raised by this handler to be ignored.
.. seealso::
See :py:meth:`~peng3d.peng.Peng.addEventListener()` for more information.
### Response:
def register_pyglet_handler(peng,func,event,raiseErrors=False):
"""
Registers the given pyglet-style event handler for the given pyglet event.
This function allows pyglet-style event handlers to receive events bridged
through the peng3d event system. Internally, this function creates a lambda
function that decodes the arguments and then calls the pyglet-style event handler.
The ``raiseErrors`` flag is passed through to the peng3d event system and will
cause any errors raised by this handler to be ignored.
.. seealso::
See :py:meth:`~peng3d.peng.Peng.addEventListener()` for more information.
"""
peng.addEventListener("pyglet:%s"%event,(lambda data:func(*data["args"])),raiseErrors) |
def merge(infiles, outfile, same_run, templatefile):
"""
Merge multiple OSW files and (for large experiments, it is recommended to subsample first).
"""
if len(infiles) < 1:
raise click.ClickException("At least one PyProphet input file needs to be provided.")
merge_osw(infiles, outfile, templatefile, same_run) | Merge multiple OSW files and (for large experiments, it is recommended to subsample first). | Below is the the instruction that describes the task:
### Input:
Merge multiple OSW files and (for large experiments, it is recommended to subsample first).
### Response:
def merge(infiles, outfile, same_run, templatefile):
"""
Merge multiple OSW files and (for large experiments, it is recommended to subsample first).
"""
if len(infiles) < 1:
raise click.ClickException("At least one PyProphet input file needs to be provided.")
merge_osw(infiles, outfile, templatefile, same_run) |
def _PrintAnalysisReportsDetails(self, storage_reader):
"""Prints the details of the analysis reports.
Args:
storage_reader (StorageReader): storage reader.
"""
if not storage_reader.HasAnalysisReports():
self._output_writer.Write('No analysis reports stored.\n\n')
return
for index, analysis_report in enumerate(
storage_reader.GetAnalysisReports()):
title = 'Analysis report: {0:d}'.format(index)
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title=title)
table_view.AddRow(['String', analysis_report.GetString()])
table_view.Write(self._output_writer) | Prints the details of the analysis reports.
Args:
storage_reader (StorageReader): storage reader. | Below is the the instruction that describes the task:
### Input:
Prints the details of the analysis reports.
Args:
storage_reader (StorageReader): storage reader.
### Response:
def _PrintAnalysisReportsDetails(self, storage_reader):
"""Prints the details of the analysis reports.
Args:
storage_reader (StorageReader): storage reader.
"""
if not storage_reader.HasAnalysisReports():
self._output_writer.Write('No analysis reports stored.\n\n')
return
for index, analysis_report in enumerate(
storage_reader.GetAnalysisReports()):
title = 'Analysis report: {0:d}'.format(index)
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title=title)
table_view.AddRow(['String', analysis_report.GetString()])
table_view.Write(self._output_writer) |
def draw_circuit_canvas(circuit, hunit = HUNIT, vunit = VUNIT, rhmargin = RHMARGIN, rvmargin = RVMARGIN, rpermutation_length = RPLENGTH, draw_boxes = True, permutation_arrows = False):
"""
Generate a PyX graphical representation of a circuit expression object.
:param circuit: The circuit expression
:type circuit: ca.Circuit
:param hunit: The horizontal length unit, default = ``HUNIT``
:type hunit: float
:param vunit: The vertical length unit, default = ``VUNIT``
:type vunit: float
:param rhmargin: relative horizontal margin, default = ``RHMARGIN``
:type rhmargin: float
:param rvmargin: relative vertical margin, default = ``RVMARGIN``
:type rvmargin: float
:param rpermutation_length: the relative length of a permutation circuit, default = ``RPLENGTH``
:type rpermutation_length: float
:param draw_boxes: Whether to draw indicator boxes to denote subexpressions (Concatenation, SeriesProduct, etc.), default = ``True``
:type draw_boxes: bool
:param permutation_arrows: Whether to draw arrows within the permutation visualization, default = ``False``
:type permutation_arrows: bool
:return: A PyX canvas object that can be further manipulated or printed to an output image.
:rtype: pyx.canvas.canvas
"""
if not isinstance(circuit, ca.Circuit):
raise ValueError()
nc = circuit.cdim
c = pyx.canvas.canvas()
if circuit is ca.CIdentity:
# simply create a line going through
c.stroke(pyx.path.line(0, vunit/2, hunit, vunit/2))
return c, (1, 1), (.5,), (.5,)
elif isinstance(circuit, (ca.CircuitSymbol, ca.SeriesInverse, ca.SLH, Component)):
# draw box
b = pyx.path.rect(rhmargin * hunit, rvmargin * vunit, hunit - 2 * rhmargin * hunit, nc * vunit - 2 * rvmargin * vunit)
c.stroke(b)
texstr = "${}$".format(tex(circuit) if not isinstance(circuit, ca.SLH) else r"{{\rm SLH}}_{{{}}}".format(tex(circuit.space)))
# draw symbol name
c.text(hunit/2., nc * vunit/2., texstr , [pyx.text.halign.boxcenter, pyx.text.valign.middle])
# draw connectors at half-unit positions
connector_positions = tuple((.5 + k) for k in range(nc))
for y in connector_positions:
c.stroke(pyx.path.line(0, y * vunit, rhmargin * hunit, y * vunit), [pyx.deco.earrow()])
c.stroke(pyx.path.line(hunit * (1 - rhmargin), y * vunit, hunit, y * vunit))
return c, (1, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.CPermutation):
permutation = circuit.permutation
connector_positions = tuple((k + 0.5) for k in range(nc))
target_positions = [connector_positions[permutation[k]] for k in range(nc)]
# draw curves
for y1, y2 in zip(connector_positions, target_positions):
if permutation_arrows:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit), [pyx.deco.earrow()])
else:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit))
if draw_boxes:
b = pyx.path.rect(.5* rhmargin * hunit, .5* rvmargin * vunit, rpermutation_length * hunit - rhmargin * hunit, nc * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.green])
return c, (rpermutation_length, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.SeriesProduct):
assert len(circuit.operands) > 1
# generate graphics of operad subsystems
sub_graphics = [draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows) for op in reversed(circuit.operands)]
# set up first one
previous_csub, previous_dims, previous_c_in, previous_c_out = sub_graphics[0]
hoffset = 0
c.insert(previous_csub)
hoffset += previous_dims[0]
max_height = previous_dims[1]
# this will later become the full series in-port coordinate tuple
first_c_in = previous_c_in
# now add all other operand subsystems
for csub, dims, c_in, c_out in sub_graphics[1:]:
assert dims[1] >= 0
max_height = max(dims[1], max_height)
if previous_c_out != c_in: # vertical port locations don't agree, map signals correspondingly
x1 = hoffset
x2 = hoffset + rpermutation_length
# draw connection curves
for y1, y2 in zip(previous_c_out, c_in):
c.stroke(_curve(x1, y1, x2, y2, hunit = hunit, vunit = vunit))
hoffset += rpermutation_length
previous_c_in, previous_c_out = c_in, c_out
# now insert current system
c.insert(csub, [pyx.trafo.translate(hunit * hoffset, 0)])
hoffset += dims[0]
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, hoffset * hunit - 1. * rhmargin * hunit, max_height * vunit + rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.red])
return c, (hoffset, max_height), first_c_in, c_out
elif isinstance(circuit, ca.Concatenation):
voffset = 0
total_cin, total_cout = (), ()
widths = [] # stores the component width for each channel(!)
# generate all operand subsystem graphics and stack them vertically
for op in circuit.operands:
csub, dims, c_in, c_out = draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
# add appropriatly offsets to vertical port coordinates
total_cin += tuple(y + voffset for y in c_in)
total_cout += tuple(y + voffset for y in c_out)
c.insert(csub, [pyx.trafo.translate(0, vunit * voffset)])
# keep track of width in all channel for this subsystem
widths += [dims[0]] * op.cdim
voffset += dims[1]
max_width = max(widths)
if max_width > min(widths): # components differ in width => we must extend the narrow component output lines
for x,y in zip(widths, total_cout):
if x == max_width:
continue
ax, ax_to = x * hunit, max_width * hunit
ay = y * vunit
c.stroke(pyx.path.line(ax, ay, ax_to, ay))
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, max_width * hunit - 1. * rhmargin * hunit, voffset * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.blue])
return c, (max_width, voffset), total_cin, total_cout
elif isinstance(circuit, ca.Feedback):
# generate and insert graphics of subsystem
csub, dims, c_in, c_out = draw_circuit_canvas(circuit.operand, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
c.insert(csub, [pyx.trafo.translate(hunit * .5 * rhmargin, 0)])
width, height = dims
# create feedback loop
fb_out, fb_in = circuit.out_in_pair
out_coords = (width + .5 * rhmargin) * hunit, c_out[fb_out] * vunit
in_coords = .5 * rhmargin * hunit, c_in[fb_in] * vunit
upper_y = (height) * vunit
feedback_line = pyx.path.path(pyx.path.moveto(*out_coords), pyx.path.lineto(out_coords[0], upper_y),
pyx.path.lineto(in_coords[0], upper_y), pyx.path.lineto(*in_coords))
c.stroke(feedback_line)
# remove feedback port coordinates
new_c_in = c_in[:fb_in] + c_in[fb_in + 1 :]
new_c_out = c_out[:fb_out] + c_out[fb_out + 1 :]
# extend port connectors a little bit outward,
# such that the feedback loop is not at the edge anymore
for y in new_c_in:
c.stroke(pyx.path.line(0, y * vunit, .5 * rhmargin * hunit, y * vunit))
for y in new_c_out:
c.stroke(pyx.path.line((width + .5 * rhmargin) * hunit, y * vunit, (width + rhmargin) * hunit, y * vunit))
return c, (width + rhmargin, height + rvmargin), new_c_in, new_c_out
raise Exception('Visualization not implemented for type %s' % type(circuit)) | Generate a PyX graphical representation of a circuit expression object.
:param circuit: The circuit expression
:type circuit: ca.Circuit
:param hunit: The horizontal length unit, default = ``HUNIT``
:type hunit: float
:param vunit: The vertical length unit, default = ``VUNIT``
:type vunit: float
:param rhmargin: relative horizontal margin, default = ``RHMARGIN``
:type rhmargin: float
:param rvmargin: relative vertical margin, default = ``RVMARGIN``
:type rvmargin: float
:param rpermutation_length: the relative length of a permutation circuit, default = ``RPLENGTH``
:type rpermutation_length: float
:param draw_boxes: Whether to draw indicator boxes to denote subexpressions (Concatenation, SeriesProduct, etc.), default = ``True``
:type draw_boxes: bool
:param permutation_arrows: Whether to draw arrows within the permutation visualization, default = ``False``
:type permutation_arrows: bool
:return: A PyX canvas object that can be further manipulated or printed to an output image.
:rtype: pyx.canvas.canvas | Below is the the instruction that describes the task:
### Input:
Generate a PyX graphical representation of a circuit expression object.
:param circuit: The circuit expression
:type circuit: ca.Circuit
:param hunit: The horizontal length unit, default = ``HUNIT``
:type hunit: float
:param vunit: The vertical length unit, default = ``VUNIT``
:type vunit: float
:param rhmargin: relative horizontal margin, default = ``RHMARGIN``
:type rhmargin: float
:param rvmargin: relative vertical margin, default = ``RVMARGIN``
:type rvmargin: float
:param rpermutation_length: the relative length of a permutation circuit, default = ``RPLENGTH``
:type rpermutation_length: float
:param draw_boxes: Whether to draw indicator boxes to denote subexpressions (Concatenation, SeriesProduct, etc.), default = ``True``
:type draw_boxes: bool
:param permutation_arrows: Whether to draw arrows within the permutation visualization, default = ``False``
:type permutation_arrows: bool
:return: A PyX canvas object that can be further manipulated or printed to an output image.
:rtype: pyx.canvas.canvas
### Response:
def draw_circuit_canvas(circuit, hunit = HUNIT, vunit = VUNIT, rhmargin = RHMARGIN, rvmargin = RVMARGIN, rpermutation_length = RPLENGTH, draw_boxes = True, permutation_arrows = False):
"""
Generate a PyX graphical representation of a circuit expression object.
:param circuit: The circuit expression
:type circuit: ca.Circuit
:param hunit: The horizontal length unit, default = ``HUNIT``
:type hunit: float
:param vunit: The vertical length unit, default = ``VUNIT``
:type vunit: float
:param rhmargin: relative horizontal margin, default = ``RHMARGIN``
:type rhmargin: float
:param rvmargin: relative vertical margin, default = ``RVMARGIN``
:type rvmargin: float
:param rpermutation_length: the relative length of a permutation circuit, default = ``RPLENGTH``
:type rpermutation_length: float
:param draw_boxes: Whether to draw indicator boxes to denote subexpressions (Concatenation, SeriesProduct, etc.), default = ``True``
:type draw_boxes: bool
:param permutation_arrows: Whether to draw arrows within the permutation visualization, default = ``False``
:type permutation_arrows: bool
:return: A PyX canvas object that can be further manipulated or printed to an output image.
:rtype: pyx.canvas.canvas
"""
if not isinstance(circuit, ca.Circuit):
raise ValueError()
nc = circuit.cdim
c = pyx.canvas.canvas()
if circuit is ca.CIdentity:
# simply create a line going through
c.stroke(pyx.path.line(0, vunit/2, hunit, vunit/2))
return c, (1, 1), (.5,), (.5,)
elif isinstance(circuit, (ca.CircuitSymbol, ca.SeriesInverse, ca.SLH, Component)):
# draw box
b = pyx.path.rect(rhmargin * hunit, rvmargin * vunit, hunit - 2 * rhmargin * hunit, nc * vunit - 2 * rvmargin * vunit)
c.stroke(b)
texstr = "${}$".format(tex(circuit) if not isinstance(circuit, ca.SLH) else r"{{\rm SLH}}_{{{}}}".format(tex(circuit.space)))
# draw symbol name
c.text(hunit/2., nc * vunit/2., texstr , [pyx.text.halign.boxcenter, pyx.text.valign.middle])
# draw connectors at half-unit positions
connector_positions = tuple((.5 + k) for k in range(nc))
for y in connector_positions:
c.stroke(pyx.path.line(0, y * vunit, rhmargin * hunit, y * vunit), [pyx.deco.earrow()])
c.stroke(pyx.path.line(hunit * (1 - rhmargin), y * vunit, hunit, y * vunit))
return c, (1, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.CPermutation):
permutation = circuit.permutation
connector_positions = tuple((k + 0.5) for k in range(nc))
target_positions = [connector_positions[permutation[k]] for k in range(nc)]
# draw curves
for y1, y2 in zip(connector_positions, target_positions):
if permutation_arrows:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit), [pyx.deco.earrow()])
else:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit))
if draw_boxes:
b = pyx.path.rect(.5* rhmargin * hunit, .5* rvmargin * vunit, rpermutation_length * hunit - rhmargin * hunit, nc * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.green])
return c, (rpermutation_length, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.SeriesProduct):
assert len(circuit.operands) > 1
# generate graphics of operad subsystems
sub_graphics = [draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows) for op in reversed(circuit.operands)]
# set up first one
previous_csub, previous_dims, previous_c_in, previous_c_out = sub_graphics[0]
hoffset = 0
c.insert(previous_csub)
hoffset += previous_dims[0]
max_height = previous_dims[1]
# this will later become the full series in-port coordinate tuple
first_c_in = previous_c_in
# now add all other operand subsystems
for csub, dims, c_in, c_out in sub_graphics[1:]:
assert dims[1] >= 0
max_height = max(dims[1], max_height)
if previous_c_out != c_in: # vertical port locations don't agree, map signals correspondingly
x1 = hoffset
x2 = hoffset + rpermutation_length
# draw connection curves
for y1, y2 in zip(previous_c_out, c_in):
c.stroke(_curve(x1, y1, x2, y2, hunit = hunit, vunit = vunit))
hoffset += rpermutation_length
previous_c_in, previous_c_out = c_in, c_out
# now insert current system
c.insert(csub, [pyx.trafo.translate(hunit * hoffset, 0)])
hoffset += dims[0]
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, hoffset * hunit - 1. * rhmargin * hunit, max_height * vunit + rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.red])
return c, (hoffset, max_height), first_c_in, c_out
elif isinstance(circuit, ca.Concatenation):
voffset = 0
total_cin, total_cout = (), ()
widths = [] # stores the component width for each channel(!)
# generate all operand subsystem graphics and stack them vertically
for op in circuit.operands:
csub, dims, c_in, c_out = draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
# add appropriatly offsets to vertical port coordinates
total_cin += tuple(y + voffset for y in c_in)
total_cout += tuple(y + voffset for y in c_out)
c.insert(csub, [pyx.trafo.translate(0, vunit * voffset)])
# keep track of width in all channel for this subsystem
widths += [dims[0]] * op.cdim
voffset += dims[1]
max_width = max(widths)
if max_width > min(widths): # components differ in width => we must extend the narrow component output lines
for x,y in zip(widths, total_cout):
if x == max_width:
continue
ax, ax_to = x * hunit, max_width * hunit
ay = y * vunit
c.stroke(pyx.path.line(ax, ay, ax_to, ay))
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, max_width * hunit - 1. * rhmargin * hunit, voffset * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.blue])
return c, (max_width, voffset), total_cin, total_cout
elif isinstance(circuit, ca.Feedback):
# generate and insert graphics of subsystem
csub, dims, c_in, c_out = draw_circuit_canvas(circuit.operand, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
c.insert(csub, [pyx.trafo.translate(hunit * .5 * rhmargin, 0)])
width, height = dims
# create feedback loop
fb_out, fb_in = circuit.out_in_pair
out_coords = (width + .5 * rhmargin) * hunit, c_out[fb_out] * vunit
in_coords = .5 * rhmargin * hunit, c_in[fb_in] * vunit
upper_y = (height) * vunit
feedback_line = pyx.path.path(pyx.path.moveto(*out_coords), pyx.path.lineto(out_coords[0], upper_y),
pyx.path.lineto(in_coords[0], upper_y), pyx.path.lineto(*in_coords))
c.stroke(feedback_line)
# remove feedback port coordinates
new_c_in = c_in[:fb_in] + c_in[fb_in + 1 :]
new_c_out = c_out[:fb_out] + c_out[fb_out + 1 :]
# extend port connectors a little bit outward,
# such that the feedback loop is not at the edge anymore
for y in new_c_in:
c.stroke(pyx.path.line(0, y * vunit, .5 * rhmargin * hunit, y * vunit))
for y in new_c_out:
c.stroke(pyx.path.line((width + .5 * rhmargin) * hunit, y * vunit, (width + rhmargin) * hunit, y * vunit))
return c, (width + rhmargin, height + rvmargin), new_c_in, new_c_out
raise Exception('Visualization not implemented for type %s' % type(circuit)) |
def outgoing_args(self, nodeid):
"""
Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}`
"""
_vars = self._vars
_hcons = self._hcons
args = self.args(nodeid) # args is a copy; we can edit it
for arg, val in list(args.items()):
# don't include constant args or intrinsic args
if arg == IVARG_ROLE or val not in _vars:
del args[arg]
else:
refs = _vars[val]['refs']
# don't include if not HCONS or pointing to other IV or LBL
if not (val in _hcons or IVARG_ROLE in refs or 'LBL' in refs):
del args[arg]
return args | Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}` | Below is the the instruction that describes the task:
### Input:
Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}`
### Response:
def outgoing_args(self, nodeid):
"""
Return the arguments going from *nodeid* to other predications.
Valid arguments include regular variable arguments and scopal
(label-selecting or HCONS) arguments. MOD/EQ
links, intrinsic arguments, and constant arguments are not
included.
Args:
nodeid: the nodeid of the EP that is the arguments' source
Returns:
dict: `{role: tgt}`
"""
_vars = self._vars
_hcons = self._hcons
args = self.args(nodeid) # args is a copy; we can edit it
for arg, val in list(args.items()):
# don't include constant args or intrinsic args
if arg == IVARG_ROLE or val not in _vars:
del args[arg]
else:
refs = _vars[val]['refs']
# don't include if not HCONS or pointing to other IV or LBL
if not (val in _hcons or IVARG_ROLE in refs or 'LBL' in refs):
del args[arg]
return args |
def jsonld(client, datasets):
"""Format datasets as JSON-LD."""
from renku.models._json import dumps
from renku.models._jsonld import asjsonld
data = [
asjsonld(
dataset,
basedir=os.path.relpath(
'.', start=str(dataset.__reference__.parent)
)
) for dataset in datasets
]
click.echo(dumps(data, indent=2)) | Format datasets as JSON-LD. | Below is the the instruction that describes the task:
### Input:
Format datasets as JSON-LD.
### Response:
def jsonld(client, datasets):
"""Format datasets as JSON-LD."""
from renku.models._json import dumps
from renku.models._jsonld import asjsonld
data = [
asjsonld(
dataset,
basedir=os.path.relpath(
'.', start=str(dataset.__reference__.parent)
)
) for dataset in datasets
]
click.echo(dumps(data, indent=2)) |
def get_nova_endpoint(cls, json_resp, nova_api_version=None):
"""
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
"""
nova_version = nova_api_version or DEFAULT_NOVA_API_VERSION
catalog = json_resp.get('token', {}).get('catalog', [])
nova_match = 'novav21' if nova_version == V21_NOVA_API_VERSION else 'nova'
for entry in catalog:
if entry['name'] == nova_match or 'Compute' in entry['name']:
# Collect any endpoints on the public or internal interface
valid_endpoints = {}
for ep in entry['endpoints']:
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep['url']
if valid_endpoints:
# Favor public endpoints over internal
nova_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal"))
return nova_endpoint
else:
raise MissingNovaEndpoint() | Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog | Below is the the instruction that describes the task:
### Input:
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
### Response:
def get_nova_endpoint(cls, json_resp, nova_api_version=None):
"""
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
"""
nova_version = nova_api_version or DEFAULT_NOVA_API_VERSION
catalog = json_resp.get('token', {}).get('catalog', [])
nova_match = 'novav21' if nova_version == V21_NOVA_API_VERSION else 'nova'
for entry in catalog:
if entry['name'] == nova_match or 'Compute' in entry['name']:
# Collect any endpoints on the public or internal interface
valid_endpoints = {}
for ep in entry['endpoints']:
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep['url']
if valid_endpoints:
# Favor public endpoints over internal
nova_endpoint = valid_endpoints.get("public", valid_endpoints.get("internal"))
return nova_endpoint
else:
raise MissingNovaEndpoint() |
def layers(self):
"""gets the service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
url = self._url + "/%s" % lyr['id']
lyr['object'] = MobileServiceLayer(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
return self._layers | gets the service layers | Below is the the instruction that describes the task:
### Input:
gets the service layers
### Response:
def layers(self):
"""gets the service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
url = self._url + "/%s" % lyr['id']
lyr['object'] = MobileServiceLayer(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
return self._layers |
def post(self, url, data=None, files=None, headers=None, raw=False,
send_as_json=True, content_type=None, **request_kwargs):
"""
POST request to AmigoCloud endpoint.
"""
return self._secure_request(
url, 'post', data=data, files=files, headers=headers, raw=raw,
send_as_json=send_as_json, content_type=content_type,
**request_kwargs
) | POST request to AmigoCloud endpoint. | Below is the the instruction that describes the task:
### Input:
POST request to AmigoCloud endpoint.
### Response:
def post(self, url, data=None, files=None, headers=None, raw=False,
send_as_json=True, content_type=None, **request_kwargs):
"""
POST request to AmigoCloud endpoint.
"""
return self._secure_request(
url, 'post', data=data, files=files, headers=headers, raw=raw,
send_as_json=send_as_json, content_type=content_type,
**request_kwargs
) |
def make_request(self, resource, params=None):
"""
Performs the API request. Most methods are a wrapper around this one.
"""
return super(VideoApi, self).make_request('video/%s' % resource, params) | Performs the API request. Most methods are a wrapper around this one. | Below is the the instruction that describes the task:
### Input:
Performs the API request. Most methods are a wrapper around this one.
### Response:
def make_request(self, resource, params=None):
"""
Performs the API request. Most methods are a wrapper around this one.
"""
return super(VideoApi, self).make_request('video/%s' % resource, params) |
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(password),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr | Returns a Basic Auth string. | Below is the the instruction that describes the task:
### Input:
Returns a Basic Auth string.
### Response:
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(password),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr |
def _parse_arguments(self, method, parameters):
"""Parse arguments to method, returning a dictionary."""
# TODO: Consider raising an exception if there are extra arguments.
arguments = _fetch_arguments(self, method)
arg_dict = {}
errors = []
for key, properties in parameters:
if key in arguments:
value = arguments[key]
try:
arg_dict[key] = _apply_validator_chain(
properties.get('validators', []), value, self)
except validators.ValidationError as err:
errors.append(err)
else:
if properties.get('required', False):
raise web.HTTPError(
400,
('Missing required parameter: %s'
% (key, ))
)
else:
if properties.get('default', None) is not None:
arg_dict[key] = properties['default']
else:
arg_dict[key] = None
if errors:
raise web.HTTPError(400, 'There were %s errors' % len(errors))
return arg_dict | Parse arguments to method, returning a dictionary. | Below is the the instruction that describes the task:
### Input:
Parse arguments to method, returning a dictionary.
### Response:
def _parse_arguments(self, method, parameters):
"""Parse arguments to method, returning a dictionary."""
# TODO: Consider raising an exception if there are extra arguments.
arguments = _fetch_arguments(self, method)
arg_dict = {}
errors = []
for key, properties in parameters:
if key in arguments:
value = arguments[key]
try:
arg_dict[key] = _apply_validator_chain(
properties.get('validators', []), value, self)
except validators.ValidationError as err:
errors.append(err)
else:
if properties.get('required', False):
raise web.HTTPError(
400,
('Missing required parameter: %s'
% (key, ))
)
else:
if properties.get('default', None) is not None:
arg_dict[key] = properties['default']
else:
arg_dict[key] = None
if errors:
raise web.HTTPError(400, 'There were %s errors' % len(errors))
return arg_dict |
def _create_breakdown_chart(self, data, work, output_dir):
"""Generates and writes to a file in `output_dir` the data used to
display a stacked bar chart.
The generated data gives the percentages of the text of the
work (across all witnesses) that are in common with all other
works, shared with each "maybe" work, and unique.
:param data: data to derive the chart data from
:type data: `pandas.DataFrame`
:param work: work to show related work data for
:type work: `str`
:param output_dir: directory to output data file to
:type output_dir: `str`
"""
chart_data = data.loc[work].sort_values(by=SHARED, ascending=False)[
[SHARED, UNIQUE, COMMON]]
csv_path = os.path.join(output_dir, 'breakdown_{}.csv'.format(
work))
chart_data.to_csv(csv_path) | Generates and writes to a file in `output_dir` the data used to
display a stacked bar chart.
The generated data gives the percentages of the text of the
work (across all witnesses) that are in common with all other
works, shared with each "maybe" work, and unique.
:param data: data to derive the chart data from
:type data: `pandas.DataFrame`
:param work: work to show related work data for
:type work: `str`
:param output_dir: directory to output data file to
:type output_dir: `str` | Below is the the instruction that describes the task:
### Input:
Generates and writes to a file in `output_dir` the data used to
display a stacked bar chart.
The generated data gives the percentages of the text of the
work (across all witnesses) that are in common with all other
works, shared with each "maybe" work, and unique.
:param data: data to derive the chart data from
:type data: `pandas.DataFrame`
:param work: work to show related work data for
:type work: `str`
:param output_dir: directory to output data file to
:type output_dir: `str`
### Response:
def _create_breakdown_chart(self, data, work, output_dir):
"""Generates and writes to a file in `output_dir` the data used to
display a stacked bar chart.
The generated data gives the percentages of the text of the
work (across all witnesses) that are in common with all other
works, shared with each "maybe" work, and unique.
:param data: data to derive the chart data from
:type data: `pandas.DataFrame`
:param work: work to show related work data for
:type work: `str`
:param output_dir: directory to output data file to
:type output_dir: `str`
"""
chart_data = data.loc[work].sort_values(by=SHARED, ascending=False)[
[SHARED, UNIQUE, COMMON]]
csv_path = os.path.join(output_dir, 'breakdown_{}.csv'.format(
work))
chart_data.to_csv(csv_path) |
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
d = self._defaults()
d.update(rec)
d['msg_id'] = msg_id
line = self._dict_to_list(d)
tups = '(%s)'%(','.join(['?']*len(line)))
self._db.execute("INSERT INTO %s VALUES %s"%(self.table, tups), line) | Add a new Task Record, by msg_id. | Below is the the instruction that describes the task:
### Input:
Add a new Task Record, by msg_id.
### Response:
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
d = self._defaults()
d.update(rec)
d['msg_id'] = msg_id
line = self._dict_to_list(d)
tups = '(%s)'%(','.join(['?']*len(line)))
self._db.execute("INSERT INTO %s VALUES %s"%(self.table, tups), line) |
def check_value_change(cls, old, new): # @NoSelf
"""Checks whether the value of the property changed in type
or if the instance has been changed to a different instance.
If true, a call to model._reset_property_notification should
be called in order to re-register the new property instance
or type"""
return (type(old) != type(new) or
isinstance(old, wrappers.ObsWrapperBase) and old != new) | Checks whether the value of the property changed in type
or if the instance has been changed to a different instance.
If true, a call to model._reset_property_notification should
be called in order to re-register the new property instance
or type | Below is the the instruction that describes the task:
### Input:
Checks whether the value of the property changed in type
or if the instance has been changed to a different instance.
If true, a call to model._reset_property_notification should
be called in order to re-register the new property instance
or type
### Response:
def check_value_change(cls, old, new): # @NoSelf
"""Checks whether the value of the property changed in type
or if the instance has been changed to a different instance.
If true, a call to model._reset_property_notification should
be called in order to re-register the new property instance
or type"""
return (type(old) != type(new) or
isinstance(old, wrappers.ObsWrapperBase) and old != new) |
def _parse_error(self, error):
""" Parses a single GLSL error and extracts the linenr and description
Other GLIR implementations may omit this.
"""
error = str(error)
# Nvidia
# 0(7): error C1008: undefined variable "MV"
m = re.match(r'(\d+)\((\d+)\)\s*:\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# ATI / Intel
# ERROR: 0:131: '{' : syntax error parse error
m = re.match(r'ERROR:\s(\d+):(\d+):\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# Nouveau
# 0:28(16): error: syntax error, unexpected ')', expecting '('
m = re.match(r'(\d+):(\d+)\((\d+)\):\s(.*)', error)
if m:
return int(m.group(2)), m.group(4)
# Other ...
return None, error | Parses a single GLSL error and extracts the linenr and description
Other GLIR implementations may omit this. | Below is the the instruction that describes the task:
### Input:
Parses a single GLSL error and extracts the linenr and description
Other GLIR implementations may omit this.
### Response:
def _parse_error(self, error):
""" Parses a single GLSL error and extracts the linenr and description
Other GLIR implementations may omit this.
"""
error = str(error)
# Nvidia
# 0(7): error C1008: undefined variable "MV"
m = re.match(r'(\d+)\((\d+)\)\s*:\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# ATI / Intel
# ERROR: 0:131: '{' : syntax error parse error
m = re.match(r'ERROR:\s(\d+):(\d+):\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# Nouveau
# 0:28(16): error: syntax error, unexpected ')', expecting '('
m = re.match(r'(\d+):(\d+)\((\d+)\):\s(.*)', error)
if m:
return int(m.group(2)), m.group(4)
# Other ...
return None, error |
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
self._RaiseIfNotWritable()
event_source = self._PrepareAttributeContainer(event_source)
self._event_sources.append(event_source)
self.number_of_event_sources += 1 | Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed. | Below is the the instruction that describes the task:
### Input:
Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
### Response:
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
self._RaiseIfNotWritable()
event_source = self._PrepareAttributeContainer(event_source)
self._event_sources.append(event_source)
self.number_of_event_sources += 1 |
def file(self, md5=None, sha1=None, sha256=None, **kwargs):
"""Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File.
"""
indicator_obj = File(md5, sha1, sha256, **kwargs)
return self._indicator(indicator_obj) | Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File. | Below is the the instruction that describes the task:
### Input:
Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File.
### Response:
def file(self, md5=None, sha1=None, sha256=None, **kwargs):
"""Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File.
"""
indicator_obj = File(md5, sha1, sha256, **kwargs)
return self._indicator(indicator_obj) |
def read_unsigned_word(self, cmd, little_endian=True):
"""
Read an unsigned word from the specified command register
We assume the data is in little endian mode, if it is in big endian
mode then set little_endian to False
"""
result = self.bus.read_word_data(self.address, cmd)
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
self.log.debug(
"read_unsigned_word: Read 0x%04X from command register 0x%02X" % (
result, cmd
)
)
return result | Read an unsigned word from the specified command register
We assume the data is in little endian mode, if it is in big endian
mode then set little_endian to False | Below is the the instruction that describes the task:
### Input:
Read an unsigned word from the specified command register
We assume the data is in little endian mode, if it is in big endian
mode then set little_endian to False
### Response:
def read_unsigned_word(self, cmd, little_endian=True):
"""
Read an unsigned word from the specified command register
We assume the data is in little endian mode, if it is in big endian
mode then set little_endian to False
"""
result = self.bus.read_word_data(self.address, cmd)
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
self.log.debug(
"read_unsigned_word: Read 0x%04X from command register 0x%02X" % (
result, cmd
)
)
return result |
def onChangeSelectedBlocksIndent(self, increase, withSpace=False):
"""Tab or Space pressed and few blocks are selected, or Shift+Tab pressed
Insert or remove text from the beginning of blocks
"""
def blockIndentation(block):
text = block.text()
return text[:len(text) - len(text.lstrip())]
def cursorAtSpaceEnd(block):
cursor = QTextCursor(block)
cursor.setPosition(block.position() + len(blockIndentation(block)))
return cursor
def indentBlock(block):
cursor = cursorAtSpaceEnd(block)
cursor.insertText(' ' if withSpace else self.text())
def spacesCount(text):
return len(text) - len(text.rstrip(' '))
def unIndentBlock(block):
currentIndent = blockIndentation(block)
if currentIndent.endswith('\t'):
charsToRemove = 1
elif withSpace:
charsToRemove = 1 if currentIndent else 0
else:
if self.useTabs:
charsToRemove = min(spacesCount(currentIndent), self.width)
else: # spaces
if currentIndent.endswith(self.text()): # remove indent level
charsToRemove = self.width
else: # remove all spaces
charsToRemove = min(spacesCount(currentIndent), self.width)
if charsToRemove:
cursor = cursorAtSpaceEnd(block)
cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor = self._qpart.textCursor()
startBlock = self._qpart.document().findBlock(cursor.selectionStart())
endBlock = self._qpart.document().findBlock(cursor.selectionEnd())
if(cursor.selectionStart() != cursor.selectionEnd() and
endBlock.position() == cursor.selectionEnd() and
endBlock.previous().isValid()):
endBlock = endBlock.previous() # do not indent not selected line if indenting multiple lines
indentFunc = indentBlock if increase else unIndentBlock
if startBlock != endBlock: # indent multiply lines
stopBlock = endBlock.next()
block = startBlock
with self._qpart:
while block != stopBlock:
indentFunc(block)
block = block.next()
newCursor = QTextCursor(startBlock)
newCursor.setPosition(endBlock.position() + len(endBlock.text()), QTextCursor.KeepAnchor)
self._qpart.setTextCursor(newCursor)
else: # indent 1 line
indentFunc(startBlock) | Tab or Space pressed and few blocks are selected, or Shift+Tab pressed
Insert or remove text from the beginning of blocks | Below is the the instruction that describes the task:
### Input:
Tab or Space pressed and few blocks are selected, or Shift+Tab pressed
Insert or remove text from the beginning of blocks
### Response:
def onChangeSelectedBlocksIndent(self, increase, withSpace=False):
"""Tab or Space pressed and few blocks are selected, or Shift+Tab pressed
Insert or remove text from the beginning of blocks
"""
def blockIndentation(block):
text = block.text()
return text[:len(text) - len(text.lstrip())]
def cursorAtSpaceEnd(block):
cursor = QTextCursor(block)
cursor.setPosition(block.position() + len(blockIndentation(block)))
return cursor
def indentBlock(block):
cursor = cursorAtSpaceEnd(block)
cursor.insertText(' ' if withSpace else self.text())
def spacesCount(text):
return len(text) - len(text.rstrip(' '))
def unIndentBlock(block):
currentIndent = blockIndentation(block)
if currentIndent.endswith('\t'):
charsToRemove = 1
elif withSpace:
charsToRemove = 1 if currentIndent else 0
else:
if self.useTabs:
charsToRemove = min(spacesCount(currentIndent), self.width)
else: # spaces
if currentIndent.endswith(self.text()): # remove indent level
charsToRemove = self.width
else: # remove all spaces
charsToRemove = min(spacesCount(currentIndent), self.width)
if charsToRemove:
cursor = cursorAtSpaceEnd(block)
cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor = self._qpart.textCursor()
startBlock = self._qpart.document().findBlock(cursor.selectionStart())
endBlock = self._qpart.document().findBlock(cursor.selectionEnd())
if(cursor.selectionStart() != cursor.selectionEnd() and
endBlock.position() == cursor.selectionEnd() and
endBlock.previous().isValid()):
endBlock = endBlock.previous() # do not indent not selected line if indenting multiple lines
indentFunc = indentBlock if increase else unIndentBlock
if startBlock != endBlock: # indent multiply lines
stopBlock = endBlock.next()
block = startBlock
with self._qpart:
while block != stopBlock:
indentFunc(block)
block = block.next()
newCursor = QTextCursor(startBlock)
newCursor.setPosition(endBlock.position() + len(endBlock.text()), QTextCursor.KeepAnchor)
self._qpart.setTextCursor(newCursor)
else: # indent 1 line
indentFunc(startBlock) |
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False | Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info. | Below is the the instruction that describes the task:
### Input:
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
### Response:
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False |
def normalizePointName(value):
"""
Normalizes point name.
* **value** must be a :ref:`type-string`.
* **value** must be at least one character long.
* Returned value will be an unencoded ``unicode`` string.
"""
if not isinstance(value, basestring):
raise TypeError("Point names must be strings, not %s."
% type(value).__name__)
if len(value) < 1:
raise ValueError("Point names must be at least one character long.")
return unicode(value) | Normalizes point name.
* **value** must be a :ref:`type-string`.
* **value** must be at least one character long.
* Returned value will be an unencoded ``unicode`` string. | Below is the the instruction that describes the task:
### Input:
Normalizes point name.
* **value** must be a :ref:`type-string`.
* **value** must be at least one character long.
* Returned value will be an unencoded ``unicode`` string.
### Response:
def normalizePointName(value):
"""
Normalizes point name.
* **value** must be a :ref:`type-string`.
* **value** must be at least one character long.
* Returned value will be an unencoded ``unicode`` string.
"""
if not isinstance(value, basestring):
raise TypeError("Point names must be strings, not %s."
% type(value).__name__)
if len(value) < 1:
raise ValueError("Point names must be at least one character long.")
return unicode(value) |
def _validate_required(self, settings, name, value):
"""
Validate a required setting (value can not be empty)
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Required value to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If value is empty.
Returns:
str: Validated value.
"""
if not value:
raise SettingsInvalidError(("Required value from setting '{name}' "
"must not be "
"empty.").format(name=name))
return value | Validate a required setting (value can not be empty)
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Required value to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If value is empty.
Returns:
str: Validated value. | Below is the the instruction that describes the task:
### Input:
Validate a required setting (value can not be empty)
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Required value to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If value is empty.
Returns:
str: Validated value.
### Response:
def _validate_required(self, settings, name, value):
"""
Validate a required setting (value can not be empty)
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Required value to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If value is empty.
Returns:
str: Validated value.
"""
if not value:
raise SettingsInvalidError(("Required value from setting '{name}' "
"must not be "
"empty.").format(name=name))
return value |
def closestConnectedDistance(target, walls=None,
max_len_border_line=500,
max_n_path=100,
concentrate_every_n_pixel=1):
'''
returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False
'''
c = concentrate_every_n_pixel
assert c >= 1
if walls is None:
walls = np.zeros_like(target, dtype=bool)
s = target.shape
dt = np.uint16
if max(target.shape) < 200:
dt = np.uint8
out = np.zeros((s[0] // c, s[1] // c), dtype=dt)
# temporary arrays:
growth = np.zeros_like(target, dtype=dt)
res = np.empty(shape=3, dtype=dt)
steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
new_steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
# run calculation:
_calc(growth, out, walls, target, steps, new_steps,
res, concentrate_every_n_pixel)
if c > 1:
# if concentrate_every_n_pixel > 1
# the resized output array
# will have wrong values close to the wall
# therefore substitute all wall value (-1)
# with an average of their closest neighbours
interpolate2dStructuredIDW(out, out == 0)
out = cv2.resize(out, s[::-1])
out[walls] = 0
return out | returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False | Below is the the instruction that describes the task:
### Input:
returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False
### Response:
def closestConnectedDistance(target, walls=None,
max_len_border_line=500,
max_n_path=100,
concentrate_every_n_pixel=1):
'''
returns an array with contains the closest distance from every pixel
the next position where target == 1
[walls] binary 2darray - e.g. walls in a labyrinth that have to be surrounded in order to get to the target
[target] binary 2darray - positions given by 1
[concentrate_every_n_pixel] often the distance of neighbour pixels is similar
to speed up calculation set this value to e.g. 3 to calculate only
the distance for every 3. pixel and interpolate in between
recommended are values up to 3-5
[max_len_border_line]
this function calculates distances travelled using region growth
e.g.
0123
1123
2223
3333
the last steps (e.g. for all steps 3 border_line=7) are stored in an array of limited
length defined in 'max_len_border_line'
[max_n_path]
how many paths are possible between every pixel and the target
only needed if fast==False
'''
c = concentrate_every_n_pixel
assert c >= 1
if walls is None:
walls = np.zeros_like(target, dtype=bool)
s = target.shape
dt = np.uint16
if max(target.shape) < 200:
dt = np.uint8
out = np.zeros((s[0] // c, s[1] // c), dtype=dt)
# temporary arrays:
growth = np.zeros_like(target, dtype=dt)
res = np.empty(shape=3, dtype=dt)
steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
new_steps = np.empty(shape=(max_len_border_line, 2), dtype=dt)
# run calculation:
_calc(growth, out, walls, target, steps, new_steps,
res, concentrate_every_n_pixel)
if c > 1:
# if concentrate_every_n_pixel > 1
# the resized output array
# will have wrong values close to the wall
# therefore substitute all wall value (-1)
# with an average of their closest neighbours
interpolate2dStructuredIDW(out, out == 0)
out = cv2.resize(out, s[::-1])
out[walls] = 0
return out |
def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size | Download with Kaggle API. | Below is the the instruction that describes the task:
### Input:
Download with Kaggle API.
### Response:
def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size |
def _strelka_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
"""
if variant.is_deletion or variant.is_insertion:
# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output
ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion)
alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion)
depth = ref_depth + alt_depth
else:
# Retrieve the Tier 1 counts from Strelka
ref_depth = int(sample_info[variant.ref+"U"][0])
alt_depth = int(sample_info[variant.alt+"U"][0])
depth = alt_depth + ref_depth
if depth > 0:
vaf = float(alt_depth) / depth
else:
# unclear how to define vaf if no reads support variant
# up to user to interpret this (hopefully filtered out in QC settings)
vaf = None
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf) | Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats | Below is the the instruction that describes the task:
### Input:
Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
### Response:
def _strelka_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
"""
if variant.is_deletion or variant.is_insertion:
# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output
ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion)
alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion)
depth = ref_depth + alt_depth
else:
# Retrieve the Tier 1 counts from Strelka
ref_depth = int(sample_info[variant.ref+"U"][0])
alt_depth = int(sample_info[variant.alt+"U"][0])
depth = alt_depth + ref_depth
if depth > 0:
vaf = float(alt_depth) / depth
else:
# unclear how to define vaf if no reads support variant
# up to user to interpret this (hopefully filtered out in QC settings)
vaf = None
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf) |
def first(self):
"""Gets item with highest priority. Performance: O(1)"""
with self.lock:
try:
return self.data[0][0]
except IndexError as ex:
ex.args = ('DEPQ is empty',)
raise | Gets item with highest priority. Performance: O(1) | Below is the the instruction that describes the task:
### Input:
Gets item with highest priority. Performance: O(1)
### Response:
def first(self):
"""Gets item with highest priority. Performance: O(1)"""
with self.lock:
try:
return self.data[0][0]
except IndexError as ex:
ex.args = ('DEPQ is empty',)
raise |
def all_true(a, indexes):
'''Find which vectors have all-true elements
Given an array, "a" and indexes into the first elements of vectors
within that array, return an array where each element is true if
all elements of the corresponding vector are true.
Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3]
vectors = [[1,1,0],[1,1,1,1]]
return = [False, True]
'''
if len(indexes) == 0:
return np.zeros(0,bool)
elif len(indexes) == 1:
return np.all(a)
cs = np.zeros(len(a)+1,int)
cs[1:] = np.cumsum(a)
augmented_indexes = np.zeros(len(indexes)+1, int)
augmented_indexes[0:-1] = indexes + 1
augmented_indexes[-1] = len(a) + 1
counts = augmented_indexes[1:]-augmented_indexes[0:-1]
hits = cs[augmented_indexes[1:]-1] - cs[augmented_indexes[0:-1]-1]
return counts == hits | Find which vectors have all-true elements
Given an array, "a" and indexes into the first elements of vectors
within that array, return an array where each element is true if
all elements of the corresponding vector are true.
Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3]
vectors = [[1,1,0],[1,1,1,1]]
return = [False, True] | Below is the the instruction that describes the task:
### Input:
Find which vectors have all-true elements
Given an array, "a" and indexes into the first elements of vectors
within that array, return an array where each element is true if
all elements of the corresponding vector are true.
Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3]
vectors = [[1,1,0],[1,1,1,1]]
return = [False, True]
### Response:
def all_true(a, indexes):
'''Find which vectors have all-true elements
Given an array, "a" and indexes into the first elements of vectors
within that array, return an array where each element is true if
all elements of the corresponding vector are true.
Example: a = [ 1,1,0,1,1,1,1], indexes=[0,3]
vectors = [[1,1,0],[1,1,1,1]]
return = [False, True]
'''
if len(indexes) == 0:
return np.zeros(0,bool)
elif len(indexes) == 1:
return np.all(a)
cs = np.zeros(len(a)+1,int)
cs[1:] = np.cumsum(a)
augmented_indexes = np.zeros(len(indexes)+1, int)
augmented_indexes[0:-1] = indexes + 1
augmented_indexes[-1] = len(a) + 1
counts = augmented_indexes[1:]-augmented_indexes[0:-1]
hits = cs[augmented_indexes[1:]-1] - cs[augmented_indexes[0:-1]-1]
return counts == hits |
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator):
"""
Simply joins the left and right hand arguments lhs and rhs with an operator.
:param lhsAST:
:param rhsAST:
:param es:
:param operator:
:return:
"""
if isinstance(lhsAST, wdl_parser.Terminal):
if lhsAST.str == 'string':
es = es + '"{string}"'.format(string=lhsAST.source_string)
else:
es = es + '{string}'.format(string=lhsAST.source_string)
elif isinstance(lhsAST, wdl_parser.Ast):
es = es + self.parse_declaration_expressn(lhsAST, es='')
elif isinstance(lhsAST, wdl_parser.AstList):
raise NotImplementedError
es = es + operator
if isinstance(rhsAST, wdl_parser.Terminal):
if rhsAST.str == 'string':
es = es + '"{string}"'.format(string=rhsAST.source_string)
else:
es = es + '{string}'.format(string=rhsAST.source_string)
elif isinstance(rhsAST, wdl_parser.Ast):
es = es + self.parse_declaration_expressn(rhsAST, es='')
elif isinstance(rhsAST, wdl_parser.AstList):
raise NotImplementedError
return es | Simply joins the left and right hand arguments lhs and rhs with an operator.
:param lhsAST:
:param rhsAST:
:param es:
:param operator:
:return: | Below is the the instruction that describes the task:
### Input:
Simply joins the left and right hand arguments lhs and rhs with an operator.
:param lhsAST:
:param rhsAST:
:param es:
:param operator:
:return:
### Response:
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator):
"""
Simply joins the left and right hand arguments lhs and rhs with an operator.
:param lhsAST:
:param rhsAST:
:param es:
:param operator:
:return:
"""
if isinstance(lhsAST, wdl_parser.Terminal):
if lhsAST.str == 'string':
es = es + '"{string}"'.format(string=lhsAST.source_string)
else:
es = es + '{string}'.format(string=lhsAST.source_string)
elif isinstance(lhsAST, wdl_parser.Ast):
es = es + self.parse_declaration_expressn(lhsAST, es='')
elif isinstance(lhsAST, wdl_parser.AstList):
raise NotImplementedError
es = es + operator
if isinstance(rhsAST, wdl_parser.Terminal):
if rhsAST.str == 'string':
es = es + '"{string}"'.format(string=rhsAST.source_string)
else:
es = es + '{string}'.format(string=rhsAST.source_string)
elif isinstance(rhsAST, wdl_parser.Ast):
es = es + self.parse_declaration_expressn(rhsAST, es='')
elif isinstance(rhsAST, wdl_parser.AstList):
raise NotImplementedError
return es |
def sort(self, key_or_list, direction=None):
"""Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Beginning with MongoDB version 2.6, text search results can be
sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed
"""
self.__check_okay_to_chain()
keys = helpers._index_list(key_or_list, direction)
self.__ordering = helpers._index_document(keys)
return self | Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Beginning with MongoDB version 2.6, text search results can be
sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed | Below is the the instruction that describes the task:
### Input:
Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Beginning with MongoDB version 2.6, text search results can be
sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed
### Response:
def sort(self, key_or_list, direction=None):
"""Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Beginning with MongoDB version 2.6, text search results can be
sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed
"""
self.__check_okay_to_chain()
keys = helpers._index_list(key_or_list, direction)
self.__ordering = helpers._index_document(keys)
return self |
def trigger_event(self, element, event, event_type=None, options=None):
"""
:Description: Trigger specified event of the given element.
:param element: Element for browser instance to target.
:type element: WebElement, (WebElement, ...)
:param event: Event to trigger from target element.
:type event: string, (string, ...)
:param event_type: Event type.
:type event_type: string
:example: 'KeyboardEvent'
:param options: Event options.
:example: { 'bubbles': True, 'cancelable': False }
:type options: dict
"""
if not isinstance(element, (tuple, list)):
element = [element]
if not isinstance(event, (tuple, list)):
event = [event]
for el in element:
for e in event:
self.browser.execute_script(
'e = new %s("%s"); ops = %s; if (ops) {for(key in ops) { \
Object.defineProperty(e, key, { value: ops[key], configurable: true }) \
}} arguments[0].dispatchEvent(e)' % (
event_type if event_type else 'Event',
e, json.dumps(options) if options else 'undefined'
), el) | :Description: Trigger specified event of the given element.
:param element: Element for browser instance to target.
:type element: WebElement, (WebElement, ...)
:param event: Event to trigger from target element.
:type event: string, (string, ...)
:param event_type: Event type.
:type event_type: string
:example: 'KeyboardEvent'
:param options: Event options.
:example: { 'bubbles': True, 'cancelable': False }
:type options: dict | Below is the the instruction that describes the task:
### Input:
:Description: Trigger specified event of the given element.
:param element: Element for browser instance to target.
:type element: WebElement, (WebElement, ...)
:param event: Event to trigger from target element.
:type event: string, (string, ...)
:param event_type: Event type.
:type event_type: string
:example: 'KeyboardEvent'
:param options: Event options.
:example: { 'bubbles': True, 'cancelable': False }
:type options: dict
### Response:
def trigger_event(self, element, event, event_type=None, options=None):
"""
:Description: Trigger specified event of the given element.
:param element: Element for browser instance to target.
:type element: WebElement, (WebElement, ...)
:param event: Event to trigger from target element.
:type event: string, (string, ...)
:param event_type: Event type.
:type event_type: string
:example: 'KeyboardEvent'
:param options: Event options.
:example: { 'bubbles': True, 'cancelable': False }
:type options: dict
"""
if not isinstance(element, (tuple, list)):
element = [element]
if not isinstance(event, (tuple, list)):
event = [event]
for el in element:
for e in event:
self.browser.execute_script(
'e = new %s("%s"); ops = %s; if (ops) {for(key in ops) { \
Object.defineProperty(e, key, { value: ops[key], configurable: true }) \
}} arguments[0].dispatchEvent(e)' % (
event_type if event_type else 'Event',
e, json.dumps(options) if options else 'undefined'
), el) |
def _do_layout(self):
"""Layout sizers"""
dialog_main_sizer = wx.BoxSizer(wx.HORIZONTAL)
upper_sizer = wx.BoxSizer(wx.HORIZONTAL)
lower_sizer = wx.FlexGridSizer(2, 1, 5, 0)
lower_sizer.AddGrowableRow(0)
lower_sizer.AddGrowableCol(0)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
upper_sizer.Add(self.codetext_ctrl, 1, wx.EXPAND, 0)
lower_sizer.Add(self.result_ctrl, 1, wx.EXPAND, 0)
lower_sizer.Add(button_sizer, 1, wx.EXPAND, 0)
button_sizer.Add(self.apply_button, 1, wx.EXPAND, 0)
self.upper_panel.SetSizer(upper_sizer)
self.lower_panel.SetSizer(lower_sizer)
sash_50 = int(round((config["window_size"][1] - 100) * 0.5))
self.splitter.SplitHorizontally(self.upper_panel,
self.lower_panel, sash_50)
dialog_main_sizer.Add(self.splitter, 1, wx.EXPAND, 0)
self.SetSizer(dialog_main_sizer)
self.Layout() | Layout sizers | Below is the the instruction that describes the task:
### Input:
Layout sizers
### Response:
def _do_layout(self):
"""Layout sizers"""
dialog_main_sizer = wx.BoxSizer(wx.HORIZONTAL)
upper_sizer = wx.BoxSizer(wx.HORIZONTAL)
lower_sizer = wx.FlexGridSizer(2, 1, 5, 0)
lower_sizer.AddGrowableRow(0)
lower_sizer.AddGrowableCol(0)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
upper_sizer.Add(self.codetext_ctrl, 1, wx.EXPAND, 0)
lower_sizer.Add(self.result_ctrl, 1, wx.EXPAND, 0)
lower_sizer.Add(button_sizer, 1, wx.EXPAND, 0)
button_sizer.Add(self.apply_button, 1, wx.EXPAND, 0)
self.upper_panel.SetSizer(upper_sizer)
self.lower_panel.SetSizer(lower_sizer)
sash_50 = int(round((config["window_size"][1] - 100) * 0.5))
self.splitter.SplitHorizontally(self.upper_panel,
self.lower_panel, sash_50)
dialog_main_sizer.Add(self.splitter, 1, wx.EXPAND, 0)
self.SetSizer(dialog_main_sizer)
self.Layout() |
def get_tunnel_info_output_tunnel_dest_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_info = ET.Element("get_tunnel_info")
config = get_tunnel_info
output = ET.SubElement(get_tunnel_info, "output")
tunnel = ET.SubElement(output, "tunnel")
dest_ip = ET.SubElement(tunnel, "dest-ip")
dest_ip.text = kwargs.pop('dest_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_tunnel_info_output_tunnel_dest_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_info = ET.Element("get_tunnel_info")
config = get_tunnel_info
output = ET.SubElement(get_tunnel_info, "output")
tunnel = ET.SubElement(output, "tunnel")
dest_ip = ET.SubElement(tunnel, "dest-ip")
dest_ip.text = kwargs.pop('dest_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _get_entity(service_instance, entity):
'''
Returns the entity associated with the entity dict representation
Supported entities: cluster, vcenter
Expected entity format:
.. code-block:: python
cluster:
{'type': 'cluster',
'datacenter': <datacenter_name>,
'cluster': <cluster_name>}
vcenter:
{'type': 'vcenter'}
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
entity
Entity dict in the format above
'''
log.trace('Retrieving entity: %s', entity)
if entity['type'] == 'cluster':
dc_ref = salt.utils.vmware.get_datacenter(service_instance,
entity['datacenter'])
return salt.utils.vmware.get_cluster(dc_ref, entity['cluster'])
elif entity['type'] == 'vcenter':
return None
raise ArgumentValueError('Unsupported entity type \'{0}\''
''.format(entity['type'])) | Returns the entity associated with the entity dict representation
Supported entities: cluster, vcenter
Expected entity format:
.. code-block:: python
cluster:
{'type': 'cluster',
'datacenter': <datacenter_name>,
'cluster': <cluster_name>}
vcenter:
{'type': 'vcenter'}
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
entity
Entity dict in the format above | Below is the the instruction that describes the task:
### Input:
Returns the entity associated with the entity dict representation
Supported entities: cluster, vcenter
Expected entity format:
.. code-block:: python
cluster:
{'type': 'cluster',
'datacenter': <datacenter_name>,
'cluster': <cluster_name>}
vcenter:
{'type': 'vcenter'}
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
entity
Entity dict in the format above
### Response:
def _get_entity(service_instance, entity):
'''
Returns the entity associated with the entity dict representation
Supported entities: cluster, vcenter
Expected entity format:
.. code-block:: python
cluster:
{'type': 'cluster',
'datacenter': <datacenter_name>,
'cluster': <cluster_name>}
vcenter:
{'type': 'vcenter'}
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
entity
Entity dict in the format above
'''
log.trace('Retrieving entity: %s', entity)
if entity['type'] == 'cluster':
dc_ref = salt.utils.vmware.get_datacenter(service_instance,
entity['datacenter'])
return salt.utils.vmware.get_cluster(dc_ref, entity['cluster'])
elif entity['type'] == 'vcenter':
return None
raise ArgumentValueError('Unsupported entity type \'{0}\''
''.format(entity['type'])) |
def get_doc(self, tag_name):
"Get documentation for the first tag matching the given name"
for tag,func in self.tags:
if tag.startswith(tag_name) and func.__doc__:
return func.__doc__ | Get documentation for the first tag matching the given name | Below is the the instruction that describes the task:
### Input:
Get documentation for the first tag matching the given name
### Response:
def get_doc(self, tag_name):
"Get documentation for the first tag matching the given name"
for tag,func in self.tags:
if tag.startswith(tag_name) and func.__doc__:
return func.__doc__ |
def get_schema_descendant(
self, route: SchemaRoute) -> Optional[SchemaNode]:
"""Return descendant schema node or ``None`` if not found.
Args:
route: Schema route to the descendant node
(relative to the receiver).
"""
node = self
for p in route:
node = node.get_child(*p)
if node is None:
return None
return node | Return descendant schema node or ``None`` if not found.
Args:
route: Schema route to the descendant node
(relative to the receiver). | Below is the the instruction that describes the task:
### Input:
Return descendant schema node or ``None`` if not found.
Args:
route: Schema route to the descendant node
(relative to the receiver).
### Response:
def get_schema_descendant(
self, route: SchemaRoute) -> Optional[SchemaNode]:
"""Return descendant schema node or ``None`` if not found.
Args:
route: Schema route to the descendant node
(relative to the receiver).
"""
node = self
for p in route:
node = node.get_child(*p)
if node is None:
return None
return node |
def brunt_vaisala_frequency_squared(heights, potential_temperature, axis=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=heights, axis=axis) | r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature | Below is the the instruction that describes the task:
### Input:
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
### Response:
def brunt_vaisala_frequency_squared(heights, potential_temperature, axis=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=heights, axis=axis) |
def get_topic_list(num=10, top_items=False):
"""
Returns a list of top recent topics, excluding less valuable forums.
Default is 10 topics.
Can be sorted for most active topics by votes and post count.
Usage:
{% get_topic_list 5 as topics %}
{% get_topic_list 7 top_items=True as topics %}
"""
excluded_forum_ids = [3, 7, 10, 12, 15, 16, 17, 18, 19, 23]
topics = Topic.objects.exclude(forum_id__in=excluded_forum_ids).order_by('-id')[0:num]
if top_items:
topics = sorted(list(topics), key=lambda t: (t.forum_id, -t.votes, -t.post_count))
return topics | Returns a list of top recent topics, excluding less valuable forums.
Default is 10 topics.
Can be sorted for most active topics by votes and post count.
Usage:
{% get_topic_list 5 as topics %}
{% get_topic_list 7 top_items=True as topics %} | Below is the the instruction that describes the task:
### Input:
Returns a list of top recent topics, excluding less valuable forums.
Default is 10 topics.
Can be sorted for most active topics by votes and post count.
Usage:
{% get_topic_list 5 as topics %}
{% get_topic_list 7 top_items=True as topics %}
### Response:
def get_topic_list(num=10, top_items=False):
"""
Returns a list of top recent topics, excluding less valuable forums.
Default is 10 topics.
Can be sorted for most active topics by votes and post count.
Usage:
{% get_topic_list 5 as topics %}
{% get_topic_list 7 top_items=True as topics %}
"""
excluded_forum_ids = [3, 7, 10, 12, 15, 16, 17, 18, 19, 23]
topics = Topic.objects.exclude(forum_id__in=excluded_forum_ids).order_by('-id')[0:num]
if top_items:
topics = sorted(list(topics), key=lambda t: (t.forum_id, -t.votes, -t.post_count))
return topics |
def view_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#create-view"
api_path = "/api/v2/views.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/views#create-view | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/views#create-view
### Response:
def view_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#create-view"
api_path = "/api/v2/views.json"
return self.call(api_path, method="POST", data=data, **kwargs) |
def _set_ovsdb_server(self, v, load=False):
"""
Setter method for ovsdb_server, mapped from YANG variable /ovsdb_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ovsdb_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ovsdb_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ovsdb_server must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__ovsdb_server = t
if hasattr(self, '_set'):
self._set() | Setter method for ovsdb_server, mapped from YANG variable /ovsdb_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ovsdb_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ovsdb_server() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for ovsdb_server, mapped from YANG variable /ovsdb_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ovsdb_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ovsdb_server() directly.
### Response:
def _set_ovsdb_server(self, v, load=False):
"""
Setter method for ovsdb_server, mapped from YANG variable /ovsdb_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ovsdb_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ovsdb_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ovsdb_server must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__ovsdb_server = t
if hasattr(self, '_set'):
self._set() |
def check_theme(theme):
"""
Check if the given theme is compatible with the terminal
"""
terminal_colors = curses.COLORS if curses.has_colors() else 0
if theme.required_colors > terminal_colors:
return False
elif theme.required_color_pairs > curses.COLOR_PAIRS:
return False
else:
return True | Check if the given theme is compatible with the terminal | Below is the the instruction that describes the task:
### Input:
Check if the given theme is compatible with the terminal
### Response:
def check_theme(theme):
"""
Check if the given theme is compatible with the terminal
"""
terminal_colors = curses.COLORS if curses.has_colors() else 0
if theme.required_colors > terminal_colors:
return False
elif theme.required_color_pairs > curses.COLOR_PAIRS:
return False
else:
return True |
def subpackets(*items):
"""Serialize several GPG subpackets."""
prefixed = [subpacket_prefix_len(item) for item in items]
return util.prefix_len('>H', b''.join(prefixed)) | Serialize several GPG subpackets. | Below is the the instruction that describes the task:
### Input:
Serialize several GPG subpackets.
### Response:
def subpackets(*items):
"""Serialize several GPG subpackets."""
prefixed = [subpacket_prefix_len(item) for item in items]
return util.prefix_len('>H', b''.join(prefixed)) |
def _on_model_delete(sender, **kwargs):
"""When a model gets deleted."""
instance = kwargs['instance']
signals.delete.send(sender, pk=instance.pk) | When a model gets deleted. | Below is the the instruction that describes the task:
### Input:
When a model gets deleted.
### Response:
def _on_model_delete(sender, **kwargs):
"""When a model gets deleted."""
instance = kwargs['instance']
signals.delete.send(sender, pk=instance.pk) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.