code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result | Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached. | Below is the the instruction that describes the task:
### Input:
Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached.
### Response:
def checkcache(filename=None, opts=False):
"""Discard cache entries that are out of date. If *filename* is *None*
all entries in the file cache *file_cache* are checked. If we do not
have stat information about a file it will be kept. Return a list of
invalidated filenames. None is returned if a filename was given but
not found cached."""
if isinstance(opts, dict):
use_linecache_lines = opts['use_linecache_lines']
else:
use_linecache_lines = opts
pass
if not filename:
filenames = list(file_cache.keys())
elif filename in file_cache:
filenames = [filename]
else:
return None
result = []
for filename in filenames:
if filename not in file_cache: continue
path = file_cache[filename].path
if os.path.exists(path):
cache_info = file_cache[filename].stat
stat = os.stat(path)
if stat and \
(cache_info.st_size != stat.st_size or
cache_info.st_mtime != stat.st_mtime):
result.append(filename)
update_cache(filename, use_linecache_lines)
else:
result.append(filename)
update_cache(filename)
pass
pass
return result |
async def get_vm(self, vm_id):
""" Dummy get_vm func """
if vm_id not in self._vms:
raise DummyIaasVmNotFound()
return self._vms[vm_id] | Dummy get_vm func | Below is the the instruction that describes the task:
### Input:
Dummy get_vm func
### Response:
async def get_vm(self, vm_id):
""" Dummy get_vm func """
if vm_id not in self._vms:
raise DummyIaasVmNotFound()
return self._vms[vm_id] |
def title_prefix_json(soup):
"titlePrefix with capitalisation changed"
prefix = title_prefix(soup)
prefix_rewritten = elifetools.json_rewrite.rewrite_json("title_prefix_json", soup, prefix)
return prefix_rewritten | titlePrefix with capitalisation changed | Below is the the instruction that describes the task:
### Input:
titlePrefix with capitalisation changed
### Response:
def title_prefix_json(soup):
"titlePrefix with capitalisation changed"
prefix = title_prefix(soup)
prefix_rewritten = elifetools.json_rewrite.rewrite_json("title_prefix_json", soup, prefix)
return prefix_rewritten |
def agent_path(cls, project, agent):
"""Return a fully-qualified agent string."""
return google.api_core.path_template.expand(
'projects/{project}/agents/{agent}',
project=project,
agent=agent,
) | Return a fully-qualified agent string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified agent string.
### Response:
def agent_path(cls, project, agent):
"""Return a fully-qualified agent string."""
return google.api_core.path_template.expand(
'projects/{project}/agents/{agent}',
project=project,
agent=agent,
) |
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
["key", "value"],
[record for record in json_records.items()],
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(),
) | :raises ValueError:
:raises pytablereader.error.ValidationError: | Below is the the instruction that describes the task:
### Input:
:raises ValueError:
:raises pytablereader.error.ValidationError:
### Response:
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
["key", "value"],
[record for record in json_records.items()],
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(),
) |
def apply_markup(value, arg=None):
"""
Applies text-to-HTML conversion.
Takes an optional argument to specify the name of a filter to use.
"""
if arg is not None:
return formatter(value, filter_name=arg)
return formatter(value) | Applies text-to-HTML conversion.
Takes an optional argument to specify the name of a filter to use. | Below is the the instruction that describes the task:
### Input:
Applies text-to-HTML conversion.
Takes an optional argument to specify the name of a filter to use.
### Response:
def apply_markup(value, arg=None):
"""
Applies text-to-HTML conversion.
Takes an optional argument to specify the name of a filter to use.
"""
if arg is not None:
return formatter(value, filter_name=arg)
return formatter(value) |
def api_request(self, url, **params):
"""
Make a trello API request. This takes an absolute url (without protocol
and host) and a list of argumnets and return a GET request with the
key and token from the configuration
"""
params['key'] = self.config.get('api_key'),
params['token'] = self.config.get('token'),
url = "https://api.trello.com" + url
return self.json_response(requests.get(url, params=params)) | Make a trello API request. This takes an absolute url (without protocol
and host) and a list of argumnets and return a GET request with the
key and token from the configuration | Below is the the instruction that describes the task:
### Input:
Make a trello API request. This takes an absolute url (without protocol
and host) and a list of argumnets and return a GET request with the
key and token from the configuration
### Response:
def api_request(self, url, **params):
"""
Make a trello API request. This takes an absolute url (without protocol
and host) and a list of argumnets and return a GET request with the
key and token from the configuration
"""
params['key'] = self.config.get('api_key'),
params['token'] = self.config.get('token'),
url = "https://api.trello.com" + url
return self.json_response(requests.get(url, params=params)) |
def trigger_event(self, module_name, event):
"""
Trigger an event on a named module.
"""
if module_name:
self._py3_wrapper.events_thread.process_event(module_name, event) | Trigger an event on a named module. | Below is the the instruction that describes the task:
### Input:
Trigger an event on a named module.
### Response:
def trigger_event(self, module_name, event):
"""
Trigger an event on a named module.
"""
if module_name:
self._py3_wrapper.events_thread.process_event(module_name, event) |
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json()) | Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store. | Below is the the instruction that describes the task:
### Input:
Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
### Response:
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json()) |
def clouds(opts):
'''
Return the cloud functions
'''
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
_module_dirs(opts,
'clouds',
'cloud',
base_path=os.path.join(SALT_BASE_PATH, 'cloud'),
int_type='clouds'),
opts,
tag='clouds',
pack={'__utils__': salt.loader.utils(opts),
'__active_provider_name__': None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
'\'%s\' has been marked as not supported. Removing from the '
'list of supported cloud functions', funcname
)
functions.pop(funcname, None)
return functions | Return the cloud functions | Below is the the instruction that describes the task:
### Input:
Return the cloud functions
### Response:
def clouds(opts):
'''
Return the cloud functions
'''
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
_module_dirs(opts,
'clouds',
'cloud',
base_path=os.path.join(SALT_BASE_PATH, 'cloud'),
int_type='clouds'),
opts,
tag='clouds',
pack={'__utils__': salt.loader.utils(opts),
'__active_provider_name__': None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
'\'%s\' has been marked as not supported. Removing from the '
'list of supported cloud functions', funcname
)
functions.pop(funcname, None)
return functions |
def remove_filter(self, server_id, filter_path):
"""
Remove an indication filter from a WBEM server, by deleting the
indication filter instance in the WBEM server.
The indication filter must be owned or permanent (i.e. not static).
This method verifies that there are not currently any subscriptions on
the specified indication filter, in order to handle server
implementations that do not ensure that on the server side as required
by :term:`DSP1054`.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
filter_path (:class:`~pywbem.CIMInstanceName`):
Instance path of the indication filter instance in the WBEM
server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_FAILED, if there are referencing subscriptions.
"""
# Validate server_id
server = self._get_server(server_id)
conn_id = server.conn.conn_id if server.conn is not None else None
# Verify referencing subscriptions.
ref_paths = server.conn.ReferenceNames(
filter_path, ResultClass=SUBSCRIPTION_CLASSNAME)
if ref_paths:
# DSP1054 1.2 defines that this CIM error is raised by the server
# in that case, so we simulate that behavior on the client side.
raise CIMError(
CIM_ERR_FAILED,
"The indication filter is referenced by subscriptions.",
conn_id=conn_id)
server.conn.DeleteInstance(filter_path)
inst_list = self._owned_filters[server_id]
# We iterate backwards because we change the list
for i in six.moves.range(len(inst_list) - 1, -1, -1):
inst = inst_list[i]
if inst.path == filter_path:
del inst_list[i] | Remove an indication filter from a WBEM server, by deleting the
indication filter instance in the WBEM server.
The indication filter must be owned or permanent (i.e. not static).
This method verifies that there are not currently any subscriptions on
the specified indication filter, in order to handle server
implementations that do not ensure that on the server side as required
by :term:`DSP1054`.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
filter_path (:class:`~pywbem.CIMInstanceName`):
Instance path of the indication filter instance in the WBEM
server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_FAILED, if there are referencing subscriptions. | Below is the the instruction that describes the task:
### Input:
Remove an indication filter from a WBEM server, by deleting the
indication filter instance in the WBEM server.
The indication filter must be owned or permanent (i.e. not static).
This method verifies that there are not currently any subscriptions on
the specified indication filter, in order to handle server
implementations that do not ensure that on the server side as required
by :term:`DSP1054`.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
filter_path (:class:`~pywbem.CIMInstanceName`):
Instance path of the indication filter instance in the WBEM
server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_FAILED, if there are referencing subscriptions.
### Response:
def remove_filter(self, server_id, filter_path):
"""
Remove an indication filter from a WBEM server, by deleting the
indication filter instance in the WBEM server.
The indication filter must be owned or permanent (i.e. not static).
This method verifies that there are not currently any subscriptions on
the specified indication filter, in order to handle server
implementations that do not ensure that on the server side as required
by :term:`DSP1054`.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
filter_path (:class:`~pywbem.CIMInstanceName`):
Instance path of the indication filter instance in the WBEM
server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_FAILED, if there are referencing subscriptions.
"""
# Validate server_id
server = self._get_server(server_id)
conn_id = server.conn.conn_id if server.conn is not None else None
# Verify referencing subscriptions.
ref_paths = server.conn.ReferenceNames(
filter_path, ResultClass=SUBSCRIPTION_CLASSNAME)
if ref_paths:
# DSP1054 1.2 defines that this CIM error is raised by the server
# in that case, so we simulate that behavior on the client side.
raise CIMError(
CIM_ERR_FAILED,
"The indication filter is referenced by subscriptions.",
conn_id=conn_id)
server.conn.DeleteInstance(filter_path)
inst_list = self._owned_filters[server_id]
# We iterate backwards because we change the list
for i in six.moves.range(len(inst_list) - 1, -1, -1):
inst = inst_list[i]
if inst.path == filter_path:
del inst_list[i] |
def load(type_tuple, into=None):
"""
Determine all types touched by loading the type and deposit them into
the particular namespace.
"""
type_dict = {}
TypeFactory.new(type_dict, *type_tuple)
deposit = into if (into is not None and isinstance(into, dict)) else {}
for reified_type in type_dict.values():
deposit[reified_type.__name__] = reified_type
return deposit | Determine all types touched by loading the type and deposit them into
the particular namespace. | Below is the the instruction that describes the task:
### Input:
Determine all types touched by loading the type and deposit them into
the particular namespace.
### Response:
def load(type_tuple, into=None):
"""
Determine all types touched by loading the type and deposit them into
the particular namespace.
"""
type_dict = {}
TypeFactory.new(type_dict, *type_tuple)
deposit = into if (into is not None and isinstance(into, dict)) else {}
for reified_type in type_dict.values():
deposit[reified_type.__name__] = reified_type
return deposit |
def _run_sm_scale_in(self, C_out, scale_sm=91.1876):
"""Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale."""
# initialize an empty SMEFT instance
smeft_sm = SMEFT(wc=None)
C_in_sm = smeftutil.C_array2dict(np.zeros(9999))
# set the SM parameters to the values obtained from smpar.smeftpar
C_SM = smpar.smeftpar(scale_sm, C_out, basis='Warsaw')
SM_keys = set(smeftutil.SM_keys) # to speed up lookup
C_SM = {k: v for k, v in C_SM.items() if k in SM_keys}
# set the Wilson coefficients at the EW scale to C_out
C_in_sm.update(C_out)
C_in_sm.update(C_SM)
smeft_sm._set_initial(C_in_sm, scale_sm)
# run up (with 1% relative precision, ignore running of Wilson coefficients)
C_SM_high = smeft_sm._rgevolve(self.scale_in, newphys=False, rtol=0.001, atol=1)
C_SM_high = self._rotate_defaultbasis(C_SM_high)
return {k: v for k, v in C_SM_high.items() if k in SM_keys} | Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale. | Below is the the instruction that describes the task:
### Input:
Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale.
### Response:
def _run_sm_scale_in(self, C_out, scale_sm=91.1876):
"""Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale."""
# initialize an empty SMEFT instance
smeft_sm = SMEFT(wc=None)
C_in_sm = smeftutil.C_array2dict(np.zeros(9999))
# set the SM parameters to the values obtained from smpar.smeftpar
C_SM = smpar.smeftpar(scale_sm, C_out, basis='Warsaw')
SM_keys = set(smeftutil.SM_keys) # to speed up lookup
C_SM = {k: v for k, v in C_SM.items() if k in SM_keys}
# set the Wilson coefficients at the EW scale to C_out
C_in_sm.update(C_out)
C_in_sm.update(C_SM)
smeft_sm._set_initial(C_in_sm, scale_sm)
# run up (with 1% relative precision, ignore running of Wilson coefficients)
C_SM_high = smeft_sm._rgevolve(self.scale_in, newphys=False, rtol=0.001, atol=1)
C_SM_high = self._rotate_defaultbasis(C_SM_high)
return {k: v for k, v in C_SM_high.items() if k in SM_keys} |
def format(element):
"""Formats all of the docstrings in the specified element and its
children into a user-friendly paragraph format for printing.
:arg element: an instance of fortpy.element.CodeElement.
"""
result = []
if type(element).__name__ in ["Subroutine", "Function"]:
_format_executable(result, element)
elif type(element).__name__ == "CustomType":
_format_type(result, element)
elif isinstance(element, ValueElement):
_format_value_element(result, element)
return '\n'.join(result) | Formats all of the docstrings in the specified element and its
children into a user-friendly paragraph format for printing.
:arg element: an instance of fortpy.element.CodeElement. | Below is the the instruction that describes the task:
### Input:
Formats all of the docstrings in the specified element and its
children into a user-friendly paragraph format for printing.
:arg element: an instance of fortpy.element.CodeElement.
### Response:
def format(element):
"""Formats all of the docstrings in the specified element and its
children into a user-friendly paragraph format for printing.
:arg element: an instance of fortpy.element.CodeElement.
"""
result = []
if type(element).__name__ in ["Subroutine", "Function"]:
_format_executable(result, element)
elif type(element).__name__ == "CustomType":
_format_type(result, element)
elif isinstance(element, ValueElement):
_format_value_element(result, element)
return '\n'.join(result) |
def GetParserProp(self, prop):
"""Read the parser internal property. """
ret = libxml2mod.xmlTextReaderGetParserProp(self._o, prop)
return ret | Read the parser internal property. | Below is the the instruction that describes the task:
### Input:
Read the parser internal property.
### Response:
def GetParserProp(self, prop):
"""Read the parser internal property. """
ret = libxml2mod.xmlTextReaderGetParserProp(self._o, prop)
return ret |
def generate_cutD_genomic_CDR3_segs(self):
"""Add palindromic inserted nucleotides to germline V sequences.
The maximum number of palindromic insertions are appended to the
germline D segments so that delDl and delDr can index directly for number
of nucleotides to delete from a segment.
Sets the attribute cutV_genomic_CDR3_segs.
"""
max_palindrome_L = self.max_delDl_palindrome
max_palindrome_R = self.max_delDr_palindrome
self.cutD_genomic_CDR3_segs = []
for CDR3_D_seg in [x[1] for x in self.genD]:
if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R):
self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))]
else:
self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)] | Add palindromic inserted nucleotides to germline V sequences.
The maximum number of palindromic insertions are appended to the
germline D segments so that delDl and delDr can index directly for number
of nucleotides to delete from a segment.
Sets the attribute cutV_genomic_CDR3_segs. | Below is the the instruction that describes the task:
### Input:
Add palindromic inserted nucleotides to germline V sequences.
The maximum number of palindromic insertions are appended to the
germline D segments so that delDl and delDr can index directly for number
of nucleotides to delete from a segment.
Sets the attribute cutV_genomic_CDR3_segs.
### Response:
def generate_cutD_genomic_CDR3_segs(self):
"""Add palindromic inserted nucleotides to germline V sequences.
The maximum number of palindromic insertions are appended to the
germline D segments so that delDl and delDr can index directly for number
of nucleotides to delete from a segment.
Sets the attribute cutV_genomic_CDR3_segs.
"""
max_palindrome_L = self.max_delDl_palindrome
max_palindrome_R = self.max_delDr_palindrome
self.cutD_genomic_CDR3_segs = []
for CDR3_D_seg in [x[1] for x in self.genD]:
if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R):
self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))]
else:
self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)] |
def _parse_property(self, node):
# type: (ElementTree.Element) -> Tuple[str, Any]
"""
Parses a property node
:param node: The property node
:return: A (name, value) tuple
:raise KeyError: Attribute missing
"""
# Get information
name = node.attrib[ATTR_NAME]
vtype = node.attrib.get(ATTR_VALUE_TYPE, TYPE_STRING)
# Look for a value as a single child node
try:
value_node = next(iter(node))
value = self._parse_value_node(vtype, value_node)
except StopIteration:
# Value is an attribute
value = self._convert_value(vtype, node.attrib[ATTR_VALUE])
return name, value | Parses a property node
:param node: The property node
:return: A (name, value) tuple
:raise KeyError: Attribute missing | Below is the the instruction that describes the task:
### Input:
Parses a property node
:param node: The property node
:return: A (name, value) tuple
:raise KeyError: Attribute missing
### Response:
def _parse_property(self, node):
# type: (ElementTree.Element) -> Tuple[str, Any]
"""
Parses a property node
:param node: The property node
:return: A (name, value) tuple
:raise KeyError: Attribute missing
"""
# Get information
name = node.attrib[ATTR_NAME]
vtype = node.attrib.get(ATTR_VALUE_TYPE, TYPE_STRING)
# Look for a value as a single child node
try:
value_node = next(iter(node))
value = self._parse_value_node(vtype, value_node)
except StopIteration:
# Value is an attribute
value = self._convert_value(vtype, node.attrib[ATTR_VALUE])
return name, value |
def get_thread(self):
"""
@see: L{get_tid}
@rtype: L{Thread}
@return: Thread where the event occured.
"""
tid = self.get_tid()
process = self.get_process()
if process.has_thread(tid):
thread = process.get_thread(tid)
else:
# XXX HACK
# The thread object was missing for some reason, so make a new one.
thread = Thread(tid)
process._add_thread(thread)
return thread | @see: L{get_tid}
@rtype: L{Thread}
@return: Thread where the event occured. | Below is the the instruction that describes the task:
### Input:
@see: L{get_tid}
@rtype: L{Thread}
@return: Thread where the event occured.
### Response:
def get_thread(self):
"""
@see: L{get_tid}
@rtype: L{Thread}
@return: Thread where the event occured.
"""
tid = self.get_tid()
process = self.get_process()
if process.has_thread(tid):
thread = process.get_thread(tid)
else:
# XXX HACK
# The thread object was missing for some reason, so make a new one.
thread = Thread(tid)
process._add_thread(thread)
return thread |
def value(self, index, extra):
"""Give count and value."""
index = index
if index==0: return 1, 0
if index<=self.RLEMAX: return (1<<index)+extra, 0
return 1, index-self.RLEMAX | Give count and value. | Below is the the instruction that describes the task:
### Input:
Give count and value.
### Response:
def value(self, index, extra):
"""Give count and value."""
index = index
if index==0: return 1, 0
if index<=self.RLEMAX: return (1<<index)+extra, 0
return 1, index-self.RLEMAX |
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits)) | Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)] | Below is the the instruction that describes the task:
### Input:
Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
### Response:
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits)) |
def infer_slice(node, context=None):
"""Understand `slice` calls."""
args = node.args
if not 0 < len(args) <= 3:
raise UseInferenceDefault
infer_func = partial(helpers.safe_infer, context=context)
args = [infer_func(arg) for arg in args]
for arg in args:
if not arg or arg is util.Uninferable:
raise UseInferenceDefault
if not isinstance(arg, nodes.Const):
raise UseInferenceDefault
if not isinstance(arg.value, (type(None), int)):
raise UseInferenceDefault
if len(args) < 3:
# Make sure we have 3 arguments.
args.extend([None] * (3 - len(args)))
slice_node = nodes.Slice(
lineno=node.lineno, col_offset=node.col_offset, parent=node.parent
)
slice_node.postinit(*args)
return slice_node | Understand `slice` calls. | Below is the the instruction that describes the task:
### Input:
Understand `slice` calls.
### Response:
def infer_slice(node, context=None):
"""Understand `slice` calls."""
args = node.args
if not 0 < len(args) <= 3:
raise UseInferenceDefault
infer_func = partial(helpers.safe_infer, context=context)
args = [infer_func(arg) for arg in args]
for arg in args:
if not arg or arg is util.Uninferable:
raise UseInferenceDefault
if not isinstance(arg, nodes.Const):
raise UseInferenceDefault
if not isinstance(arg.value, (type(None), int)):
raise UseInferenceDefault
if len(args) < 3:
# Make sure we have 3 arguments.
args.extend([None] * (3 - len(args)))
slice_node = nodes.Slice(
lineno=node.lineno, col_offset=node.col_offset, parent=node.parent
)
slice_node.postinit(*args)
return slice_node |
def record_diff(old, new):
"""Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))`"""
old, new = _norm_json_params(old, new)
return json_delta.diff(new, old, verbose=False) | Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))` | Below is the the instruction that describes the task:
### Input:
Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))`
### Response:
def record_diff(old, new):
"""Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))`"""
old, new = _norm_json_params(old, new)
return json_delta.diff(new, old, verbose=False) |
def get(self, stype, flags, filters, options=None):
"""
Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned.
"""
if not isinstance(flags, str):
if isinstance(flags, list):
finflags = ",".join(flags)
else:
raise SyntaxError("Flags should be a list or comma separated string")
else:
finflags = flags
if not isinstance(filters, str):
raise SyntaxError("Filters needs to be a string in the format Filter<op>Value. The simplest form is search=\"<Term>\".")
if stype not in self.stypes:
raise SyntaxError("{} not a valid Search type.".format(stype))
if '"' not in filters or "'" not in filters:
newfilters = self.helperpat.split(filters)
newfilters = [x.strip() for x in newfilters]
newfilters[1] = '"' + newfilters[1] + '"'
op = self.helperpat.search(filters)
newfilters = op.group(0).join(newfilters)
command = '{} {} ({}){}'.format(stype, finflags, newfilters,
' ' + ujson.dumps(options) if options is not None else '')
else:
command = '{} {} ({}){}'.format(stype, finflags, filters,
' ' + ujson.dumps(options) if options is not None else '')
data = self.connection.send_command('get', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return {'pages': data.get('more', default=False), 'data': data['items']} | Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned. | Below is the the instruction that describes the task:
### Input:
Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned.
### Response:
def get(self, stype, flags, filters, options=None):
"""
Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned.
"""
if not isinstance(flags, str):
if isinstance(flags, list):
finflags = ",".join(flags)
else:
raise SyntaxError("Flags should be a list or comma separated string")
else:
finflags = flags
if not isinstance(filters, str):
raise SyntaxError("Filters needs to be a string in the format Filter<op>Value. The simplest form is search=\"<Term>\".")
if stype not in self.stypes:
raise SyntaxError("{} not a valid Search type.".format(stype))
if '"' not in filters or "'" not in filters:
newfilters = self.helperpat.split(filters)
newfilters = [x.strip() for x in newfilters]
newfilters[1] = '"' + newfilters[1] + '"'
op = self.helperpat.search(filters)
newfilters = op.group(0).join(newfilters)
command = '{} {} ({}){}'.format(stype, finflags, newfilters,
' ' + ujson.dumps(options) if options is not None else '')
else:
command = '{} {} ({}){}'.format(stype, finflags, filters,
' ' + ujson.dumps(options) if options is not None else '')
data = self.connection.send_command('get', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return {'pages': data.get('more', default=False), 'data': data['items']} |
def wait_for(self, event, predicate, result=None):
"""Waits for a DISPATCH'd event that meets the predicate.
Parameters
-----------
event: :class:`str`
The event name in all upper case to wait for.
predicate
A function that takes a data parameter to check for event
properties. The data parameter is the 'd' key in the JSON message.
result
A function that takes the same data parameter and executes to send
the result to the future. If None, returns the data.
Returns
--------
asyncio.Future
A future to wait for.
"""
future = self.loop.create_future()
entry = EventListener(event=event, predicate=predicate, result=result, future=future)
self._dispatch_listeners.append(entry)
return future | Waits for a DISPATCH'd event that meets the predicate.
Parameters
-----------
event: :class:`str`
The event name in all upper case to wait for.
predicate
A function that takes a data parameter to check for event
properties. The data parameter is the 'd' key in the JSON message.
result
A function that takes the same data parameter and executes to send
the result to the future. If None, returns the data.
Returns
--------
asyncio.Future
A future to wait for. | Below is the the instruction that describes the task:
### Input:
Waits for a DISPATCH'd event that meets the predicate.
Parameters
-----------
event: :class:`str`
The event name in all upper case to wait for.
predicate
A function that takes a data parameter to check for event
properties. The data parameter is the 'd' key in the JSON message.
result
A function that takes the same data parameter and executes to send
the result to the future. If None, returns the data.
Returns
--------
asyncio.Future
A future to wait for.
### Response:
def wait_for(self, event, predicate, result=None):
"""Waits for a DISPATCH'd event that meets the predicate.
Parameters
-----------
event: :class:`str`
The event name in all upper case to wait for.
predicate
A function that takes a data parameter to check for event
properties. The data parameter is the 'd' key in the JSON message.
result
A function that takes the same data parameter and executes to send
the result to the future. If None, returns the data.
Returns
--------
asyncio.Future
A future to wait for.
"""
future = self.loop.create_future()
entry = EventListener(event=event, predicate=predicate, result=result, future=future)
self._dispatch_listeners.append(entry)
return future |
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_rdns=True,
proxy_username=None, proxy_password=None,
timeout=None, source_address=None,
socket_options=None):
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility)
"""
# Remove IPv6 brackets on the remote address and proxy address.
remote_host, remote_port = dest_pair
if remote_host.startswith('['):
remote_host = remote_host.strip('[]')
if proxy_addr and proxy_addr.startswith('['):
proxy_addr = proxy_addr.strip('[]')
err = None
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
family, socket_type, proto, canonname, sa = r
sock = None
try:
sock = socksocket(family, socket_type, proto)
if socket_options:
for opt in socket_options:
sock.setsockopt(*opt)
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
if proxy_type:
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
proxy_username, proxy_password)
if source_address:
sock.bind(source_address)
sock.connect((remote_host, remote_port))
return sock
except (socket.error, ProxyConnectionError) as e:
err = e
if sock:
sock.close()
sock = None
if err:
raise err
raise socket.error("gai returned empty list.") | create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility) | Below is the the instruction that describes the task:
### Input:
create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility)
### Response:
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_rdns=True,
proxy_username=None, proxy_password=None,
timeout=None, source_address=None,
socket_options=None):
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility)
"""
# Remove IPv6 brackets on the remote address and proxy address.
remote_host, remote_port = dest_pair
if remote_host.startswith('['):
remote_host = remote_host.strip('[]')
if proxy_addr and proxy_addr.startswith('['):
proxy_addr = proxy_addr.strip('[]')
err = None
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
family, socket_type, proto, canonname, sa = r
sock = None
try:
sock = socksocket(family, socket_type, proto)
if socket_options:
for opt in socket_options:
sock.setsockopt(*opt)
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
if proxy_type:
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
proxy_username, proxy_password)
if source_address:
sock.bind(source_address)
sock.connect((remote_host, remote_port))
return sock
except (socket.error, ProxyConnectionError) as e:
err = e
if sock:
sock.close()
sock = None
if err:
raise err
raise socket.error("gai returned empty list.") |
def create_custom_menu(self, menu_data, matchrule):
"""
创建个性化菜单::
button = [
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}]
}]
matchrule = {
"group_id":"2",
"sex":"1",
"country":"中国",
"province":"广东",
"city":"广州",
"client_platform_type":"2",
"language":"zh_CN"
}
client.create_custom_menu(button, matchrule)
:param menu_data: 如上所示的 Python 字典
:param matchrule: 如上所示的匹配规则
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/addconditional",
data={
"button": menu_data,
"matchrule": matchrule
}
) | 创建个性化菜单::
button = [
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}]
}]
matchrule = {
"group_id":"2",
"sex":"1",
"country":"中国",
"province":"广东",
"city":"广州",
"client_platform_type":"2",
"language":"zh_CN"
}
client.create_custom_menu(button, matchrule)
:param menu_data: 如上所示的 Python 字典
:param matchrule: 如上所示的匹配规则
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
创建个性化菜单::
button = [
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}]
}]
matchrule = {
"group_id":"2",
"sex":"1",
"country":"中国",
"province":"广东",
"city":"广州",
"client_platform_type":"2",
"language":"zh_CN"
}
client.create_custom_menu(button, matchrule)
:param menu_data: 如上所示的 Python 字典
:param matchrule: 如上所示的匹配规则
:return: 返回的 JSON 数据包
### Response:
def create_custom_menu(self, menu_data, matchrule):
"""
创建个性化菜单::
button = [
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}]
}]
matchrule = {
"group_id":"2",
"sex":"1",
"country":"中国",
"province":"广东",
"city":"广州",
"client_platform_type":"2",
"language":"zh_CN"
}
client.create_custom_menu(button, matchrule)
:param menu_data: 如上所示的 Python 字典
:param matchrule: 如上所示的匹配规则
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/addconditional",
data={
"button": menu_data,
"matchrule": matchrule
}
) |
def start_state_id(self):
""" The start state is the state to which the first transition goes to.
The setter-method creates a unique first transition to the state with the given id.
Existing first transitions are removed. If the given state id is None, the first transition is removed.
:return: The id of the start state
"""
for transition_id in self.transitions:
if self.transitions[transition_id].from_state is None:
to_state = self.transitions[transition_id].to_state
if to_state is not None:
return to_state
else:
return self.state_id
return None | The start state is the state to which the first transition goes to.
The setter-method creates a unique first transition to the state with the given id.
Existing first transitions are removed. If the given state id is None, the first transition is removed.
:return: The id of the start state | Below is the the instruction that describes the task:
### Input:
The start state is the state to which the first transition goes to.
The setter-method creates a unique first transition to the state with the given id.
Existing first transitions are removed. If the given state id is None, the first transition is removed.
:return: The id of the start state
### Response:
def start_state_id(self):
""" The start state is the state to which the first transition goes to.
The setter-method creates a unique first transition to the state with the given id.
Existing first transitions are removed. If the given state id is None, the first transition is removed.
:return: The id of the start state
"""
for transition_id in self.transitions:
if self.transitions[transition_id].from_state is None:
to_state = self.transitions[transition_id].to_state
if to_state is not None:
return to_state
else:
return self.state_id
return None |
def _hasher_first_run(self, preimage):
'''
Invoke the backend on-demand, and check an expected hash result,
then replace this first run with the new hasher method.
This is a bit of a hacky way to minimize overhead on hash calls after this first one.
'''
new_hasher = self._backend.keccak256
assert new_hasher(b'') == b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';\x7b\xfa\xd8\x04]\x85\xa4p" # noqa: E501
self.hasher = new_hasher
return new_hasher(preimage) | Invoke the backend on-demand, and check an expected hash result,
then replace this first run with the new hasher method.
This is a bit of a hacky way to minimize overhead on hash calls after this first one. | Below is the the instruction that describes the task:
### Input:
Invoke the backend on-demand, and check an expected hash result,
then replace this first run with the new hasher method.
This is a bit of a hacky way to minimize overhead on hash calls after this first one.
### Response:
def _hasher_first_run(self, preimage):
'''
Invoke the backend on-demand, and check an expected hash result,
then replace this first run with the new hasher method.
This is a bit of a hacky way to minimize overhead on hash calls after this first one.
'''
new_hasher = self._backend.keccak256
assert new_hasher(b'') == b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';\x7b\xfa\xd8\x04]\x85\xa4p" # noqa: E501
self.hasher = new_hasher
return new_hasher(preimage) |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._share_detail is not None:
return False
if self._start_date is not None:
return False
if self._end_date is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._share_detail is not None:
return False
if self._start_date is not None:
return False
if self._end_date is not None:
return False
return True |
def find_coordinates(hmms, bit_thresh):
"""
find 16S rRNA gene sequence coordinates
"""
# get coordinates from cmsearch output
seq2hmm = parse_hmm(hmms, bit_thresh)
seq2hmm = best_model(seq2hmm)
group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]
for seq, info in list(seq2hmm.items()):
group2hmm[seq] = {}
# info = [model, [[hit1], [hit2], ...]]
for group_num, group in enumerate(hit_groups(info[1])):
# group is a group of hits to a single 16S gene
# determine matching strand based on best hit
best = sorted(group, reverse = True, key = itemgetter(-1))[0]
strand = best[5]
coordinates = [i[0] for i in group] + [i[1] for i in group]
coordinates = [min(coordinates), max(coordinates), strand]
# make sure all hits are to the same strand
matches = [i for i in group if i[5] == strand]
# gaps = [[gstart, gend], [gstart2, gend2]]
gaps = check_gaps(matches)
group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps]
return group2hmm | find 16S rRNA gene sequence coordinates | Below is the the instruction that describes the task:
### Input:
find 16S rRNA gene sequence coordinates
### Response:
def find_coordinates(hmms, bit_thresh):
"""
find 16S rRNA gene sequence coordinates
"""
# get coordinates from cmsearch output
seq2hmm = parse_hmm(hmms, bit_thresh)
seq2hmm = best_model(seq2hmm)
group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]
for seq, info in list(seq2hmm.items()):
group2hmm[seq] = {}
# info = [model, [[hit1], [hit2], ...]]
for group_num, group in enumerate(hit_groups(info[1])):
# group is a group of hits to a single 16S gene
# determine matching strand based on best hit
best = sorted(group, reverse = True, key = itemgetter(-1))[0]
strand = best[5]
coordinates = [i[0] for i in group] + [i[1] for i in group]
coordinates = [min(coordinates), max(coordinates), strand]
# make sure all hits are to the same strand
matches = [i for i in group if i[5] == strand]
# gaps = [[gstart, gend], [gstart2, gend2]]
gaps = check_gaps(matches)
group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps]
return group2hmm |
def on_to_position(self, speed, position, brake=True, block=True):
"""
Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units.
"""
speed = self._speed_native_units(speed)
self.speed_sp = int(round(speed))
self.position_sp = position
self._set_brake(brake)
self.run_to_abs_pos()
if block:
self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT)
self.wait_until_not_moving() | Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units. | Below is the the instruction that describes the task:
### Input:
Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units.
### Response:
def on_to_position(self, speed, position, brake=True, block=True):
"""
Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units.
"""
speed = self._speed_native_units(speed)
self.speed_sp = int(round(speed))
self.position_sp = position
self._set_brake(brake)
self.run_to_abs_pos()
if block:
self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT)
self.wait_until_not_moving() |
def remove_foreign_key(self, name):
"""
Removes the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
"""
name = self._normalize_identifier(name)
if not self.has_foreign_key(name):
raise ForeignKeyDoesNotExist(name, self._name)
del self._fk_constraints[name] | Removes the foreign key constraint with the given name.
:param name: The constraint name
:type name: str | Below is the the instruction that describes the task:
### Input:
Removes the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
### Response:
def remove_foreign_key(self, name):
"""
Removes the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
"""
name = self._normalize_identifier(name)
if not self.has_foreign_key(name):
raise ForeignKeyDoesNotExist(name, self._name)
del self._fk_constraints[name] |
def libname_from_dir(dirname):
"""Reconstruct the library name without it's version"""
parts = []
for part in dirname.split('-'):
if part[0].isdigit():
break
parts.append(part)
return '-'.join(parts) | Reconstruct the library name without it's version | Below is the the instruction that describes the task:
### Input:
Reconstruct the library name without it's version
### Response:
def libname_from_dir(dirname):
"""Reconstruct the library name without it's version"""
parts = []
for part in dirname.split('-'):
if part[0].isdigit():
break
parts.append(part)
return '-'.join(parts) |
def from_operator(operation=debug):
"""# python-category-equations
With the tools provided here you can create category like equations for the given operator.
On the equations the underlaying '+' and '-' operations are basic set operations
called union and discard and the multiplication operator '*' connects sources to sinks.
The equation system also has a Identity 'I' term and zerO -like termination term 'O'.
For futher details go https://en.wikipedia.org/wiki/Category_(mathematics)#Definition
## Usage
Here our connector operation is print function called 'debug' which
prints an arrow between two objects:
>>> debug('a', 'b')
a -> b
>>> debug('b', 'a')
b -> a
>>> debug('a', 'a')
a -> a
Get I and O singletons and class C, which use previously defined debug -function.
>>> I, O, C = from_operator(debug)
>>> I == I
True
>>> O == I
False
>>> C(1)
C(1)
The items do have differing sinks and sources:
>>> I.sinks
{I}
>>> I.sources
{I}
>>> O.sinks
set()
>>> O.sources
set()
>>> C(1).sinks
{1}
>>> C(1).sources
{1}
You can write additions also with this notation
>>> C(1,2) == C(1) + C(2)
True
The multiplication connects sources to sinks like this:
>>> (C(1,2) * C(3,4)).evaluate()
1 -> 3
1 -> 4
2 -> 3
2 -> 4
>>> (C(3,4) * C(1,2)).sinks
{3, 4}
>>> (C(3,4) * C(1,2)).sources
{1, 2}
By combining the two previous examples:
>>> C(1,2) * C(3,4) == (C(1) + C(2)) * (C(3) + C(4))
True
The order inside C(...) does not matter:
>>> (C(1,2) * C(3,4)) == (C(2,1) * C(4,3))
True
On the other hand you can not swap the terms like:
>>> (C(1,2) * C(3,4)) == (C(3,4) * C(1,2))
False
Because:
>>> (C(3,4) * C(1,2)).evaluate()
3 -> 1
3 -> 2
4 -> 1
4 -> 2
The discard operation works like this:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).evaluate()
3 -> 1
3 -> 2
4 -> 2
But
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2)
False
Because sinks and sources differ:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).sinks
{3}
>>> (C(3) * C(1,2) + C(4) * C(2)).sinks
{3, 4}
The right form would have been:
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2) - C(4) * O - O * C(1)
True
The identity I and zero O work together like usual:
>>> I * I == I
True
>>> O * I * O == O
True
Identity 'I' works as a tool for equation simplifying.
For example:
>>> C(1,2) * C(3,4) * C(5) + C(1,2) * C(5) == C(1,2) * ( C(3,4) + I ) * C(5)
True
Because:
>>> (C(1,2) * C(3,4) * C(5) + C(1,2) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
and
>>> (C(1,2) * ( C(3,4) + I ) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
If two terms have the same middle part you can simplify equations
via terminating loose sinks or sources with O:
For example:
>>> (C(1) * C(2) * C(4) + C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * C(2) * C(4) + O * C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * ( C(2) + O * C(3) ) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> C(1) * C(2) * C(4) + O * C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
True
Note that the comparison wont work without the O -term because the sinks differ:
>>> C(1) * C(2) * C(4) + C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
False
"""
_I, _O = get_I_and_O(operation)
def _C(*things):
return Adder(operator=operation, items=set(things))
return _I, _O, _C | # python-category-equations
With the tools provided here you can create category like equations for the given operator.
On the equations the underlaying '+' and '-' operations are basic set operations
called union and discard and the multiplication operator '*' connects sources to sinks.
The equation system also has a Identity 'I' term and zerO -like termination term 'O'.
For futher details go https://en.wikipedia.org/wiki/Category_(mathematics)#Definition
## Usage
Here our connector operation is print function called 'debug' which
prints an arrow between two objects:
>>> debug('a', 'b')
a -> b
>>> debug('b', 'a')
b -> a
>>> debug('a', 'a')
a -> a
Get I and O singletons and class C, which use previously defined debug -function.
>>> I, O, C = from_operator(debug)
>>> I == I
True
>>> O == I
False
>>> C(1)
C(1)
The items do have differing sinks and sources:
>>> I.sinks
{I}
>>> I.sources
{I}
>>> O.sinks
set()
>>> O.sources
set()
>>> C(1).sinks
{1}
>>> C(1).sources
{1}
You can write additions also with this notation
>>> C(1,2) == C(1) + C(2)
True
The multiplication connects sources to sinks like this:
>>> (C(1,2) * C(3,4)).evaluate()
1 -> 3
1 -> 4
2 -> 3
2 -> 4
>>> (C(3,4) * C(1,2)).sinks
{3, 4}
>>> (C(3,4) * C(1,2)).sources
{1, 2}
By combining the two previous examples:
>>> C(1,2) * C(3,4) == (C(1) + C(2)) * (C(3) + C(4))
True
The order inside C(...) does not matter:
>>> (C(1,2) * C(3,4)) == (C(2,1) * C(4,3))
True
On the other hand you can not swap the terms like:
>>> (C(1,2) * C(3,4)) == (C(3,4) * C(1,2))
False
Because:
>>> (C(3,4) * C(1,2)).evaluate()
3 -> 1
3 -> 2
4 -> 1
4 -> 2
The discard operation works like this:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).evaluate()
3 -> 1
3 -> 2
4 -> 2
But
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2)
False
Because sinks and sources differ:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).sinks
{3}
>>> (C(3) * C(1,2) + C(4) * C(2)).sinks
{3, 4}
The right form would have been:
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2) - C(4) * O - O * C(1)
True
The identity I and zero O work together like usual:
>>> I * I == I
True
>>> O * I * O == O
True
Identity 'I' works as a tool for equation simplifying.
For example:
>>> C(1,2) * C(3,4) * C(5) + C(1,2) * C(5) == C(1,2) * ( C(3,4) + I ) * C(5)
True
Because:
>>> (C(1,2) * C(3,4) * C(5) + C(1,2) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
and
>>> (C(1,2) * ( C(3,4) + I ) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
If two terms have the same middle part you can simplify equations
via terminating loose sinks or sources with O:
For example:
>>> (C(1) * C(2) * C(4) + C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * C(2) * C(4) + O * C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * ( C(2) + O * C(3) ) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> C(1) * C(2) * C(4) + O * C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
True
Note that the comparison wont work without the O -term because the sinks differ:
>>> C(1) * C(2) * C(4) + C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
False | Below is the the instruction that describes the task:
### Input:
# python-category-equations
With the tools provided here you can create category like equations for the given operator.
On the equations the underlaying '+' and '-' operations are basic set operations
called union and discard and the multiplication operator '*' connects sources to sinks.
The equation system also has a Identity 'I' term and zerO -like termination term 'O'.
For futher details go https://en.wikipedia.org/wiki/Category_(mathematics)#Definition
## Usage
Here our connector operation is print function called 'debug' which
prints an arrow between two objects:
>>> debug('a', 'b')
a -> b
>>> debug('b', 'a')
b -> a
>>> debug('a', 'a')
a -> a
Get I and O singletons and class C, which use previously defined debug -function.
>>> I, O, C = from_operator(debug)
>>> I == I
True
>>> O == I
False
>>> C(1)
C(1)
The items do have differing sinks and sources:
>>> I.sinks
{I}
>>> I.sources
{I}
>>> O.sinks
set()
>>> O.sources
set()
>>> C(1).sinks
{1}
>>> C(1).sources
{1}
You can write additions also with this notation
>>> C(1,2) == C(1) + C(2)
True
The multiplication connects sources to sinks like this:
>>> (C(1,2) * C(3,4)).evaluate()
1 -> 3
1 -> 4
2 -> 3
2 -> 4
>>> (C(3,4) * C(1,2)).sinks
{3, 4}
>>> (C(3,4) * C(1,2)).sources
{1, 2}
By combining the two previous examples:
>>> C(1,2) * C(3,4) == (C(1) + C(2)) * (C(3) + C(4))
True
The order inside C(...) does not matter:
>>> (C(1,2) * C(3,4)) == (C(2,1) * C(4,3))
True
On the other hand you can not swap the terms like:
>>> (C(1,2) * C(3,4)) == (C(3,4) * C(1,2))
False
Because:
>>> (C(3,4) * C(1,2)).evaluate()
3 -> 1
3 -> 2
4 -> 1
4 -> 2
The discard operation works like this:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).evaluate()
3 -> 1
3 -> 2
4 -> 2
But
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2)
False
Because sinks and sources differ:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).sinks
{3}
>>> (C(3) * C(1,2) + C(4) * C(2)).sinks
{3, 4}
The right form would have been:
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2) - C(4) * O - O * C(1)
True
The identity I and zero O work together like usual:
>>> I * I == I
True
>>> O * I * O == O
True
Identity 'I' works as a tool for equation simplifying.
For example:
>>> C(1,2) * C(3,4) * C(5) + C(1,2) * C(5) == C(1,2) * ( C(3,4) + I ) * C(5)
True
Because:
>>> (C(1,2) * C(3,4) * C(5) + C(1,2) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
and
>>> (C(1,2) * ( C(3,4) + I ) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
If two terms have the same middle part you can simplify equations
via terminating loose sinks or sources with O:
For example:
>>> (C(1) * C(2) * C(4) + C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * C(2) * C(4) + O * C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * ( C(2) + O * C(3) ) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> C(1) * C(2) * C(4) + O * C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
True
Note that the comparison wont work without the O -term because the sinks differ:
>>> C(1) * C(2) * C(4) + C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
False
### Response:
def from_operator(operation=debug):
"""# python-category-equations
With the tools provided here you can create category like equations for the given operator.
On the equations the underlaying '+' and '-' operations are basic set operations
called union and discard and the multiplication operator '*' connects sources to sinks.
The equation system also has a Identity 'I' term and zerO -like termination term 'O'.
For futher details go https://en.wikipedia.org/wiki/Category_(mathematics)#Definition
## Usage
Here our connector operation is print function called 'debug' which
prints an arrow between two objects:
>>> debug('a', 'b')
a -> b
>>> debug('b', 'a')
b -> a
>>> debug('a', 'a')
a -> a
Get I and O singletons and class C, which use previously defined debug -function.
>>> I, O, C = from_operator(debug)
>>> I == I
True
>>> O == I
False
>>> C(1)
C(1)
The items do have differing sinks and sources:
>>> I.sinks
{I}
>>> I.sources
{I}
>>> O.sinks
set()
>>> O.sources
set()
>>> C(1).sinks
{1}
>>> C(1).sources
{1}
You can write additions also with this notation
>>> C(1,2) == C(1) + C(2)
True
The multiplication connects sources to sinks like this:
>>> (C(1,2) * C(3,4)).evaluate()
1 -> 3
1 -> 4
2 -> 3
2 -> 4
>>> (C(3,4) * C(1,2)).sinks
{3, 4}
>>> (C(3,4) * C(1,2)).sources
{1, 2}
By combining the two previous examples:
>>> C(1,2) * C(3,4) == (C(1) + C(2)) * (C(3) + C(4))
True
The order inside C(...) does not matter:
>>> (C(1,2) * C(3,4)) == (C(2,1) * C(4,3))
True
On the other hand you can not swap the terms like:
>>> (C(1,2) * C(3,4)) == (C(3,4) * C(1,2))
False
Because:
>>> (C(3,4) * C(1,2)).evaluate()
3 -> 1
3 -> 2
4 -> 1
4 -> 2
The discard operation works like this:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).evaluate()
3 -> 1
3 -> 2
4 -> 2
But
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2)
False
Because sinks and sources differ:
>>> (C(3,4) * C(1,2) - C(4) * C(1)).sinks
{3}
>>> (C(3) * C(1,2) + C(4) * C(2)).sinks
{3, 4}
The right form would have been:
>>> (C(3,4) * C(1,2) - C(4) * C(1)) == C(3) * C(1,2) + C(4) * C(2) - C(4) * O - O * C(1)
True
The identity I and zero O work together like usual:
>>> I * I == I
True
>>> O * I * O == O
True
Identity 'I' works as a tool for equation simplifying.
For example:
>>> C(1,2) * C(3,4) * C(5) + C(1,2) * C(5) == C(1,2) * ( C(3,4) + I ) * C(5)
True
Because:
>>> (C(1,2) * C(3,4) * C(5) + C(1,2) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
and
>>> (C(1,2) * ( C(3,4) + I ) * C(5)).evaluate()
1 -> 3
1 -> 4
1 -> 5
2 -> 3
2 -> 4
2 -> 5
3 -> 5
4 -> 5
If two terms have the same middle part you can simplify equations
via terminating loose sinks or sources with O:
For example:
>>> (C(1) * C(2) * C(4) + C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * C(2) * C(4) + O * C(3) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> (C(1) * ( C(2) + O * C(3) ) * C(4)).evaluate()
1 -> 2
2 -> 4
3 -> 4
>>> C(1) * C(2) * C(4) + O * C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
True
Note that the comparison wont work without the O -term because the sinks differ:
>>> C(1) * C(2) * C(4) + C(3) * C(4) == C(1) * ( C(2) + O * C(3) ) * C(4)
False
"""
_I, _O = get_I_and_O(operation)
def _C(*things):
return Adder(operator=operation, items=set(things))
return _I, _O, _C |
def _topWCoordinates(cls, coordinates, w):
"""
Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order
"""
orders = numpy.array([cls._orderForCoordinate(c)
for c in coordinates.tolist()])
indices = numpy.argsort(orders)[-w:]
return coordinates[indices] | Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order | Below is the the instruction that describes the task:
### Input:
Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order
### Response:
def _topWCoordinates(cls, coordinates, w):
"""
Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order
"""
orders = numpy.array([cls._orderForCoordinate(c)
for c in coordinates.tolist()])
indices = numpy.argsort(orders)[-w:]
return coordinates[indices] |
def _write(self):
"""
Writes data from backup_dict property in serialized and compressed form
to backup file pointed in json_file property.
"""
self.json_file.seek(0)
self.json_file.truncate()
dump = json.dumps(self.backup_dict)
dump_c = zlib.compress(dump.encode('utf-8'))
self.json_file.write(dump_c) | Writes data from backup_dict property in serialized and compressed form
to backup file pointed in json_file property. | Below is the the instruction that describes the task:
### Input:
Writes data from backup_dict property in serialized and compressed form
to backup file pointed in json_file property.
### Response:
def _write(self):
"""
Writes data from backup_dict property in serialized and compressed form
to backup file pointed in json_file property.
"""
self.json_file.seek(0)
self.json_file.truncate()
dump = json.dumps(self.backup_dict)
dump_c = zlib.compress(dump.encode('utf-8'))
self.json_file.write(dump_c) |
def format(self, indent_level, indent_size=4):
"""Format this verifier
Returns:
string: A formatted string
"""
name = self.format_name('Boolean', indent_size)
if self._require_value is not None:
if self.long_desc is not None:
name += '\n'
name += self.wrap_lines('must be %s\n' % str(self._require_value).lower(), 1, indent_size)
return self.wrap_lines(name, indent_level, indent_size) | Format this verifier
Returns:
string: A formatted string | Below is the the instruction that describes the task:
### Input:
Format this verifier
Returns:
string: A formatted string
### Response:
def format(self, indent_level, indent_size=4):
"""Format this verifier
Returns:
string: A formatted string
"""
name = self.format_name('Boolean', indent_size)
if self._require_value is not None:
if self.long_desc is not None:
name += '\n'
name += self.wrap_lines('must be %s\n' % str(self._require_value).lower(), 1, indent_size)
return self.wrap_lines(name, indent_level, indent_size) |
def sort_basis_dict(bs):
"""Sorts a basis set dictionary into a standard order
This, for example, allows the written file to be more easily read by humans by,
for example, putting the name and description before more detailed fields.
This is generally for cosmetic reasons. However, users will generally like things
in a consistent order
"""
# yapf: disable
_keyorder = [
# Schema stuff
'molssi_bse_schema', 'schema_type', 'schema_version',
# Auxiliary block
'jkfit', 'jfit', 'rifit', 'admmfit', 'dftxfit', 'dftjfit',
# Basis set metadata
'name', 'names', 'aliases', 'flags', 'family', 'description', 'role', 'auxiliaries',
'notes', 'function_types',
# Reference stuff
'reference_description', 'reference_keys',
# Version metadata
'version', 'revision_description',
# Sources of components
'data_source',
# Elements and data
'elements', 'references', 'ecp_electrons',
'electron_shells', 'ecp_potentials', 'components',
# Shell information
'function_type', 'region', 'angular_momentum', 'exponents',
'coefficients',
'ecp_type', 'angular_momentum', 'r_exponents', 'gaussian_exponents',
'coefficients'
]
# yapf: enable
# Add integers for the elements (being optimistic that element 150 will be found someday)
_keyorder.extend([str(x) for x in range(150)])
bs_sorted = sorted(bs.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
bs_sorted = OrderedDict(bs_sorted)
else:
bs_sorted = dict(bs_sorted)
for k, v in bs_sorted.items():
# If this is a dictionary, sort recursively
# If this is a list, sort each element but DO NOT sort the list itself.
if isinstance(v, dict):
bs_sorted[k] = sort_basis_dict(v)
elif isinstance(v, list):
# Note - the only nested list is with coeffs, which shouldn't be sorted
# (so we don't have to recurse into lists of lists)
bs_sorted[k] = [sort_basis_dict(x) if isinstance(x, dict) else x for x in v]
return bs_sorted | Sorts a basis set dictionary into a standard order
This, for example, allows the written file to be more easily read by humans by,
for example, putting the name and description before more detailed fields.
This is generally for cosmetic reasons. However, users will generally like things
in a consistent order | Below is the the instruction that describes the task:
### Input:
Sorts a basis set dictionary into a standard order
This, for example, allows the written file to be more easily read by humans by,
for example, putting the name and description before more detailed fields.
This is generally for cosmetic reasons. However, users will generally like things
in a consistent order
### Response:
def sort_basis_dict(bs):
"""Sorts a basis set dictionary into a standard order
This, for example, allows the written file to be more easily read by humans by,
for example, putting the name and description before more detailed fields.
This is generally for cosmetic reasons. However, users will generally like things
in a consistent order
"""
# yapf: disable
_keyorder = [
# Schema stuff
'molssi_bse_schema', 'schema_type', 'schema_version',
# Auxiliary block
'jkfit', 'jfit', 'rifit', 'admmfit', 'dftxfit', 'dftjfit',
# Basis set metadata
'name', 'names', 'aliases', 'flags', 'family', 'description', 'role', 'auxiliaries',
'notes', 'function_types',
# Reference stuff
'reference_description', 'reference_keys',
# Version metadata
'version', 'revision_description',
# Sources of components
'data_source',
# Elements and data
'elements', 'references', 'ecp_electrons',
'electron_shells', 'ecp_potentials', 'components',
# Shell information
'function_type', 'region', 'angular_momentum', 'exponents',
'coefficients',
'ecp_type', 'angular_momentum', 'r_exponents', 'gaussian_exponents',
'coefficients'
]
# yapf: enable
# Add integers for the elements (being optimistic that element 150 will be found someday)
_keyorder.extend([str(x) for x in range(150)])
bs_sorted = sorted(bs.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
bs_sorted = OrderedDict(bs_sorted)
else:
bs_sorted = dict(bs_sorted)
for k, v in bs_sorted.items():
# If this is a dictionary, sort recursively
# If this is a list, sort each element but DO NOT sort the list itself.
if isinstance(v, dict):
bs_sorted[k] = sort_basis_dict(v)
elif isinstance(v, list):
# Note - the only nested list is with coeffs, which shouldn't be sorted
# (so we don't have to recurse into lists of lists)
bs_sorted[k] = [sort_basis_dict(x) if isinstance(x, dict) else x for x in v]
return bs_sorted |
def _make_executable(path):
"""Make the file at path executable."""
os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) | Make the file at path executable. | Below is the the instruction that describes the task:
### Input:
Make the file at path executable.
### Response:
def _make_executable(path):
"""Make the file at path executable."""
os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) |
def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
""" Computes additive self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
v is a learnable vector of n_hidden dimensionality,
W_1 and W_2 are learnable [n_hidden, n_input_features] matrices
Args:
units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
n_hidden: number of units in hidden representation of similarity measure
n_output_features: number of features in output dense layer
activation: activation at the output
Returns:
output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
"""
n_input_features = units.get_shape().as_list()[2]
if n_hidden is None:
n_hidden = n_input_features
if n_output_features is None:
n_output_features = n_input_features
units_pairs = tf.concat([expand_tile(units, 1), expand_tile(units, 2)], 3)
query = tf.layers.dense(units_pairs, n_hidden, activation=tf.tanh, kernel_initializer=INITIALIZER())
attention = tf.nn.softmax(tf.layers.dense(query, 1), dim=2)
attended_units = tf.reduce_sum(attention * expand_tile(units, 1), axis=2)
output = tf.layers.dense(attended_units, n_output_features, activation, kernel_initializer=INITIALIZER())
return output | Computes additive self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
v is a learnable vector of n_hidden dimensionality,
W_1 and W_2 are learnable [n_hidden, n_input_features] matrices
Args:
units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
n_hidden: number of units in hidden representation of similarity measure
n_output_features: number of features in output dense layer
activation: activation at the output
Returns:
output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features] | Below is the the instruction that describes the task:
### Input:
Computes additive self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
v is a learnable vector of n_hidden dimensionality,
W_1 and W_2 are learnable [n_hidden, n_input_features] matrices
Args:
units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
n_hidden: number of units in hidden representation of similarity measure
n_output_features: number of features in output dense layer
activation: activation at the output
Returns:
output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
### Response:
def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
""" Computes additive self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
v is a learnable vector of n_hidden dimensionality,
W_1 and W_2 are learnable [n_hidden, n_input_features] matrices
Args:
units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
n_hidden: number of units in hidden representation of similarity measure
n_output_features: number of features in output dense layer
activation: activation at the output
Returns:
output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
"""
n_input_features = units.get_shape().as_list()[2]
if n_hidden is None:
n_hidden = n_input_features
if n_output_features is None:
n_output_features = n_input_features
units_pairs = tf.concat([expand_tile(units, 1), expand_tile(units, 2)], 3)
query = tf.layers.dense(units_pairs, n_hidden, activation=tf.tanh, kernel_initializer=INITIALIZER())
attention = tf.nn.softmax(tf.layers.dense(query, 1), dim=2)
attended_units = tf.reduce_sum(attention * expand_tile(units, 1), axis=2)
output = tf.layers.dense(attended_units, n_output_features, activation, kernel_initializer=INITIALIZER())
return output |
def SETNZ(cpu, dest):
"""
Sets byte if not zero.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.ZF == False, 1, 0)) | Sets byte if not zero.
:param cpu: current CPU.
:param dest: destination operand. | Below is the the instruction that describes the task:
### Input:
Sets byte if not zero.
:param cpu: current CPU.
:param dest: destination operand.
### Response:
def SETNZ(cpu, dest):
"""
Sets byte if not zero.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.ZF == False, 1, 0)) |
def add_cli(self, prefix, other_cli):
"""Adds the functionality of the other CLI to this one, where all
commands to the other CLI are prefixed by the given prefix plus a hyphen.
e.g. To execute command `greet anson 5` on other cli given prefix cli2, you
can execute the following with this CLI:
cli2 greet anson 5
"""
if prefix not in self.clis and prefix not in self.cmds:
self.clis[prefix] = other_cli
else:
raise ValueError('Attempting to overwrite cmd or extern CLI: %s' % prefix) | Adds the functionality of the other CLI to this one, where all
commands to the other CLI are prefixed by the given prefix plus a hyphen.
e.g. To execute command `greet anson 5` on other cli given prefix cli2, you
can execute the following with this CLI:
cli2 greet anson 5 | Below is the the instruction that describes the task:
### Input:
Adds the functionality of the other CLI to this one, where all
commands to the other CLI are prefixed by the given prefix plus a hyphen.
e.g. To execute command `greet anson 5` on other cli given prefix cli2, you
can execute the following with this CLI:
cli2 greet anson 5
### Response:
def add_cli(self, prefix, other_cli):
"""Adds the functionality of the other CLI to this one, where all
commands to the other CLI are prefixed by the given prefix plus a hyphen.
e.g. To execute command `greet anson 5` on other cli given prefix cli2, you
can execute the following with this CLI:
cli2 greet anson 5
"""
if prefix not in self.clis and prefix not in self.cmds:
self.clis[prefix] = other_cli
else:
raise ValueError('Attempting to overwrite cmd or extern CLI: %s' % prefix) |
def on_save_as(self):
"""
Save the current editor document as.
"""
path = self.tabWidget.current_widget().file.path
path = os.path.dirname(path) if path else ''
filename, filter = QtWidgets.QFileDialog.getSaveFileName(
self, 'Save', path)
if filename:
self.tabWidget.save_current(filename)
self.recent_files_manager.open_file(filename)
self.menu_recents.update_actions()
self.actionRun.setEnabled(True)
self.actionConfigure_run.setEnabled(True)
self._update_status_bar(self.tabWidget.current_widget()) | Save the current editor document as. | Below is the the instruction that describes the task:
### Input:
Save the current editor document as.
### Response:
def on_save_as(self):
"""
Save the current editor document as.
"""
path = self.tabWidget.current_widget().file.path
path = os.path.dirname(path) if path else ''
filename, filter = QtWidgets.QFileDialog.getSaveFileName(
self, 'Save', path)
if filename:
self.tabWidget.save_current(filename)
self.recent_files_manager.open_file(filename)
self.menu_recents.update_actions()
self.actionRun.setEnabled(True)
self.actionConfigure_run.setEnabled(True)
self._update_status_bar(self.tabWidget.current_widget()) |
def check_version(version, server):
"""Check if the current CLI version is supported by the One Codex backend.
Parameters
----------
version : `string`
Current version of the One Codex client library
server : `string`
Complete URL to One Codex server, e.g., https://app.onecodex.com
Returns
-------
`tuple` containing two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None.
"""
def version_inadequate(client_version, server_version):
"""Simple, fast check for version inequality.
Could use python package `semver` if we need more precise checks in
edge cases, but this generally works for now.
"""
client_version = tuple([int(x) for x in client_version.split("-")[0].split(".")])
server_version = tuple([int(x) for x in server_version.split(".")])
return client_version < server_version
# this will probably live on /api/v0 forever for compat with older CLI versions
data = requests.post(server + "api/v0/check_for_cli_update", data={"version": version})
if data.status_code != 200:
return False, "Error connecting to server"
data = data.json()
latest_version = data["latest_version"]
if version_inadequate(version, latest_version):
return (
True,
(
"Please upgrade your client to the latest version (v{}) using the command "
"`pip install --upgrade onecodex`".format(latest_version)
),
)
return False, None | Check if the current CLI version is supported by the One Codex backend.
Parameters
----------
version : `string`
Current version of the One Codex client library
server : `string`
Complete URL to One Codex server, e.g., https://app.onecodex.com
Returns
-------
`tuple` containing two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None. | Below is the the instruction that describes the task:
### Input:
Check if the current CLI version is supported by the One Codex backend.
Parameters
----------
version : `string`
Current version of the One Codex client library
server : `string`
Complete URL to One Codex server, e.g., https://app.onecodex.com
Returns
-------
`tuple` containing two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None.
### Response:
def check_version(version, server):
"""Check if the current CLI version is supported by the One Codex backend.
Parameters
----------
version : `string`
Current version of the One Codex client library
server : `string`
Complete URL to One Codex server, e.g., https://app.onecodex.com
Returns
-------
`tuple` containing two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None.
"""
def version_inadequate(client_version, server_version):
"""Simple, fast check for version inequality.
Could use python package `semver` if we need more precise checks in
edge cases, but this generally works for now.
"""
client_version = tuple([int(x) for x in client_version.split("-")[0].split(".")])
server_version = tuple([int(x) for x in server_version.split(".")])
return client_version < server_version
# this will probably live on /api/v0 forever for compat with older CLI versions
data = requests.post(server + "api/v0/check_for_cli_update", data={"version": version})
if data.status_code != 200:
return False, "Error connecting to server"
data = data.json()
latest_version = data["latest_version"]
if version_inadequate(version, latest_version):
return (
True,
(
"Please upgrade your client to the latest version (v{}) using the command "
"`pip install --upgrade onecodex`".format(latest_version)
),
)
return False, None |
def create_raid(self, raid_config):
"""Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
controllers = self._parse_raid_config_data(raid_config)
# Creating raid on rest of the controllers
for controller in controllers:
try:
config = {'logical_disks': controllers[controller]}
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
ssc_obj.create_raid(config)
else:
members = (
self.smart_storage.array_controllers.get_members())
models = [member.model for member in members]
msg = ('Controller not found. Available controllers are: '
'%(models)s' % {'models': models})
any_exceptions.append((controller, msg))
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to create the '
'raid configuration for one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg) | Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO. | Below is the the instruction that describes the task:
### Input:
Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO.
### Response:
def create_raid(self, raid_config):
"""Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
controllers = self._parse_raid_config_data(raid_config)
# Creating raid on rest of the controllers
for controller in controllers:
try:
config = {'logical_disks': controllers[controller]}
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
ssc_obj.create_raid(config)
else:
members = (
self.smart_storage.array_controllers.get_members())
models = [member.model for member in members]
msg = ('Controller not found. Available controllers are: '
'%(models)s' % {'models': models})
any_exceptions.append((controller, msg))
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to create the '
'raid configuration for one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg) |
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, str) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if lib.infer_dtype(values, skipna=False) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, ensure_platform_int(new_labels) | Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates. | Below is the the instruction that describes the task:
### Input:
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
### Response:
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, str) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if lib.infer_dtype(values, skipna=False) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, ensure_platform_int(new_labels) |
def __verify_username(self, username):
"""Raises an exception if the given username is not valid."""
if len(username) > self.__max_length or len(username) < 3:
raise ValueError(
f"Username must be between 3 and {self.__max_length} characters."
)
if any(c not in string.ascii_letters + string.digits for c in username):
raise ValueError("Usernames can only contain alphanumeric characters.") | Raises an exception if the given username is not valid. | Below is the the instruction that describes the task:
### Input:
Raises an exception if the given username is not valid.
### Response:
def __verify_username(self, username):
"""Raises an exception if the given username is not valid."""
if len(username) > self.__max_length or len(username) < 3:
raise ValueError(
f"Username must be between 3 and {self.__max_length} characters."
)
if any(c not in string.ascii_letters + string.digits for c in username):
raise ValueError("Usernames can only contain alphanumeric characters.") |
def _prepare_text(self, text):
"""Returns `text` with each consituent token wrapped in HTML markup
for later match annotation.
:param text: text to be marked up
:type text: `str`
:rtype: `str`
"""
# Remove characters that should be escaped for XML input (but
# which cause problems when escaped, since they become
# tokens).
text = re.sub(r'[<>&]', '', text)
pattern = r'({})'.format(self._tokenizer.pattern)
return re.sub(pattern, self._base_token_markup, text) | Returns `text` with each consituent token wrapped in HTML markup
for later match annotation.
:param text: text to be marked up
:type text: `str`
:rtype: `str` | Below is the the instruction that describes the task:
### Input:
Returns `text` with each consituent token wrapped in HTML markup
for later match annotation.
:param text: text to be marked up
:type text: `str`
:rtype: `str`
### Response:
def _prepare_text(self, text):
"""Returns `text` with each consituent token wrapped in HTML markup
for later match annotation.
:param text: text to be marked up
:type text: `str`
:rtype: `str`
"""
# Remove characters that should be escaped for XML input (but
# which cause problems when escaped, since they become
# tokens).
text = re.sub(r'[<>&]', '', text)
pattern = r'({})'.format(self._tokenizer.pattern)
return re.sub(pattern, self._base_token_markup, text) |
def angle(vec1, vec2):
"""Calculate the angle between two Vector2's"""
dotp = Vector2.dot(vec1, vec2)
mag1 = vec1.length()
mag2 = vec2.length()
result = dotp / (mag1 * mag2)
return math.acos(result) | Calculate the angle between two Vector2's | Below is the the instruction that describes the task:
### Input:
Calculate the angle between two Vector2's
### Response:
def angle(vec1, vec2):
"""Calculate the angle between two Vector2's"""
dotp = Vector2.dot(vec1, vec2)
mag1 = vec1.length()
mag2 = vec2.length()
result = dotp / (mag1 * mag2)
return math.acos(result) |
def _set_bfd_min_tx(self, v, load=False):
"""
Setter method for bfd_min_tx, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd/bfd_sub_cmds/bfd_min_tx (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd_min_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd_min_tx() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bfd_min_tx.bfd_min_tx, is_container='container', presence=False, yang_name="bfd-min-tx", rest_name="min-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'BFD tx rate for control packets', u'cli-sequence-commands': None, u'alt-name': u'min-tx'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bfd_min_tx must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bfd_min_tx.bfd_min_tx, is_container='container', presence=False, yang_name="bfd-min-tx", rest_name="min-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'BFD tx rate for control packets', u'cli-sequence-commands': None, u'alt-name': u'min-tx'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__bfd_min_tx = t
if hasattr(self, '_set'):
self._set() | Setter method for bfd_min_tx, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd/bfd_sub_cmds/bfd_min_tx (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd_min_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd_min_tx() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for bfd_min_tx, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd/bfd_sub_cmds/bfd_min_tx (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd_min_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd_min_tx() directly.
### Response:
def _set_bfd_min_tx(self, v, load=False):
"""
Setter method for bfd_min_tx, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd/bfd_sub_cmds/bfd_min_tx (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd_min_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd_min_tx() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bfd_min_tx.bfd_min_tx, is_container='container', presence=False, yang_name="bfd-min-tx", rest_name="min-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'BFD tx rate for control packets', u'cli-sequence-commands': None, u'alt-name': u'min-tx'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bfd_min_tx must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bfd_min_tx.bfd_min_tx, is_container='container', presence=False, yang_name="bfd-min-tx", rest_name="min-tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'BFD tx rate for control packets', u'cli-sequence-commands': None, u'alt-name': u'min-tx'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__bfd_min_tx = t
if hasattr(self, '_set'):
self._set() |
def cartesian(self, *iterables, **kwargs):
"""
Returns the cartesian product of the passed iterables with the specified number of
repetitions.
The keyword argument `repeat` is read from kwargs to pass to itertools.cartesian.
>>> seq.range(2).cartesian(range(2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
:param iterables: elements for cartesian product
:param kwargs: the variable `repeat` is read from kwargs
:return: cartesian product
"""
return self._transform(transformations.cartesian_t(iterables, kwargs.get('repeat', 1))) | Returns the cartesian product of the passed iterables with the specified number of
repetitions.
The keyword argument `repeat` is read from kwargs to pass to itertools.cartesian.
>>> seq.range(2).cartesian(range(2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
:param iterables: elements for cartesian product
:param kwargs: the variable `repeat` is read from kwargs
:return: cartesian product | Below is the the instruction that describes the task:
### Input:
Returns the cartesian product of the passed iterables with the specified number of
repetitions.
The keyword argument `repeat` is read from kwargs to pass to itertools.cartesian.
>>> seq.range(2).cartesian(range(2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
:param iterables: elements for cartesian product
:param kwargs: the variable `repeat` is read from kwargs
:return: cartesian product
### Response:
def cartesian(self, *iterables, **kwargs):
"""
Returns the cartesian product of the passed iterables with the specified number of
repetitions.
The keyword argument `repeat` is read from kwargs to pass to itertools.cartesian.
>>> seq.range(2).cartesian(range(2))
[(0, 0), (0, 1), (1, 0), (1, 1)]
:param iterables: elements for cartesian product
:param kwargs: the variable `repeat` is read from kwargs
:return: cartesian product
"""
return self._transform(transformations.cartesian_t(iterables, kwargs.get('repeat', 1))) |
def get_bonds(self,
self_bonding_allowed=False,
offset=3,
modified_properties=None,
use_lookup=False,
set_lookup=True,
atomic_radius_data=None
):
"""Return a dictionary representing the bonds.
.. warning:: This function is **not sideeffect free**, since it
assigns the output to a variable ``self._metadata['bond_dict']`` if
``set_lookup`` is ``True`` (which is the default). This is
necessary for performance reasons.
``.get_bonds()`` will use or not use a lookup
depending on ``use_lookup``. Greatly increases performance if
True, but could introduce bugs in certain situations.
Just imagine a situation where the :class:`~Cartesian` is
changed manually. If you apply lateron a method e.g.
:meth:`~get_zmat()` that makes use of :meth:`~get_bonds()`
the dictionary of the bonds
may not represent the actual situation anymore.
You have two possibilities to cope with this problem.
Either you just re-execute ``get_bonds`` on your specific instance,
or you change the ``internally_use_lookup`` option in the settings.
Please note that the internal use of the lookup variable
greatly improves performance.
Args:
modified_properties (dic): If you want to change the van der
Vaals radius of one or more specific atoms, pass a
dictionary that looks like::
modified_properties = {index1: 1.5}
For global changes use the constants module.
offset (float):
use_lookup (bool):
set_lookup (bool):
self_bonding_allowed (bool):
atomic_radius_data (str): Defines which column of
:attr:`constants.elements` is used. The default is
``atomic_radius_cc`` and can be changed with
:attr:`settings['defaults']['atomic_radius_data']`.
Compare with :func:`add_data`.
Returns:
dict: Dictionary mapping from an atom index to the set of
indices of atoms bonded to.
"""
if atomic_radius_data is None:
atomic_radius_data = settings['defaults']['atomic_radius_data']
def complete_calculation():
old_index = self.index
self.index = range(len(self))
fragments = self._divide_et_impera(offset=offset)
positions = np.array(self.loc[:, ['x', 'y', 'z']], order='F')
data = self.add_data([atomic_radius_data, 'valency'])
bond_radii = data[atomic_radius_data]
if modified_properties is not None:
bond_radii.update(pd.Series(modified_properties))
bond_radii = bond_radii.values
bond_dict = collections.defaultdict(set)
for i, j, k in product(*[range(x) for x in fragments.shape]):
# The following call is not side effect free and changes
# bond_dict
self._update_bond_dict(
fragments[i, j, k], positions, bond_radii,
bond_dict=bond_dict,
self_bonding_allowed=self_bonding_allowed)
for i in set(self.index) - set(bond_dict.keys()):
bond_dict[i] = {}
self.index = old_index
rename = dict(enumerate(self.index))
bond_dict = {rename[key]: {rename[i] for i in bond_dict[key]}
for key in bond_dict}
return bond_dict
if use_lookup:
try:
bond_dict = self._metadata['bond_dict']
except KeyError:
bond_dict = complete_calculation()
else:
bond_dict = complete_calculation()
if set_lookup:
self._metadata['bond_dict'] = bond_dict
return bond_dict | Return a dictionary representing the bonds.
.. warning:: This function is **not sideeffect free**, since it
assigns the output to a variable ``self._metadata['bond_dict']`` if
``set_lookup`` is ``True`` (which is the default). This is
necessary for performance reasons.
``.get_bonds()`` will use or not use a lookup
depending on ``use_lookup``. Greatly increases performance if
True, but could introduce bugs in certain situations.
Just imagine a situation where the :class:`~Cartesian` is
changed manually. If you apply lateron a method e.g.
:meth:`~get_zmat()` that makes use of :meth:`~get_bonds()`
the dictionary of the bonds
may not represent the actual situation anymore.
You have two possibilities to cope with this problem.
Either you just re-execute ``get_bonds`` on your specific instance,
or you change the ``internally_use_lookup`` option in the settings.
Please note that the internal use of the lookup variable
greatly improves performance.
Args:
modified_properties (dic): If you want to change the van der
Vaals radius of one or more specific atoms, pass a
dictionary that looks like::
modified_properties = {index1: 1.5}
For global changes use the constants module.
offset (float):
use_lookup (bool):
set_lookup (bool):
self_bonding_allowed (bool):
atomic_radius_data (str): Defines which column of
:attr:`constants.elements` is used. The default is
``atomic_radius_cc`` and can be changed with
:attr:`settings['defaults']['atomic_radius_data']`.
Compare with :func:`add_data`.
Returns:
dict: Dictionary mapping from an atom index to the set of
indices of atoms bonded to. | Below is the the instruction that describes the task:
### Input:
Return a dictionary representing the bonds.
.. warning:: This function is **not sideeffect free**, since it
assigns the output to a variable ``self._metadata['bond_dict']`` if
``set_lookup`` is ``True`` (which is the default). This is
necessary for performance reasons.
``.get_bonds()`` will use or not use a lookup
depending on ``use_lookup``. Greatly increases performance if
True, but could introduce bugs in certain situations.
Just imagine a situation where the :class:`~Cartesian` is
changed manually. If you apply lateron a method e.g.
:meth:`~get_zmat()` that makes use of :meth:`~get_bonds()`
the dictionary of the bonds
may not represent the actual situation anymore.
You have two possibilities to cope with this problem.
Either you just re-execute ``get_bonds`` on your specific instance,
or you change the ``internally_use_lookup`` option in the settings.
Please note that the internal use of the lookup variable
greatly improves performance.
Args:
modified_properties (dic): If you want to change the van der
Vaals radius of one or more specific atoms, pass a
dictionary that looks like::
modified_properties = {index1: 1.5}
For global changes use the constants module.
offset (float):
use_lookup (bool):
set_lookup (bool):
self_bonding_allowed (bool):
atomic_radius_data (str): Defines which column of
:attr:`constants.elements` is used. The default is
``atomic_radius_cc`` and can be changed with
:attr:`settings['defaults']['atomic_radius_data']`.
Compare with :func:`add_data`.
Returns:
dict: Dictionary mapping from an atom index to the set of
indices of atoms bonded to.
### Response:
def get_bonds(self,
self_bonding_allowed=False,
offset=3,
modified_properties=None,
use_lookup=False,
set_lookup=True,
atomic_radius_data=None
):
"""Return a dictionary representing the bonds.
.. warning:: This function is **not sideeffect free**, since it
assigns the output to a variable ``self._metadata['bond_dict']`` if
``set_lookup`` is ``True`` (which is the default). This is
necessary for performance reasons.
``.get_bonds()`` will use or not use a lookup
depending on ``use_lookup``. Greatly increases performance if
True, but could introduce bugs in certain situations.
Just imagine a situation where the :class:`~Cartesian` is
changed manually. If you apply lateron a method e.g.
:meth:`~get_zmat()` that makes use of :meth:`~get_bonds()`
the dictionary of the bonds
may not represent the actual situation anymore.
You have two possibilities to cope with this problem.
Either you just re-execute ``get_bonds`` on your specific instance,
or you change the ``internally_use_lookup`` option in the settings.
Please note that the internal use of the lookup variable
greatly improves performance.
Args:
modified_properties (dic): If you want to change the van der
Vaals radius of one or more specific atoms, pass a
dictionary that looks like::
modified_properties = {index1: 1.5}
For global changes use the constants module.
offset (float):
use_lookup (bool):
set_lookup (bool):
self_bonding_allowed (bool):
atomic_radius_data (str): Defines which column of
:attr:`constants.elements` is used. The default is
``atomic_radius_cc`` and can be changed with
:attr:`settings['defaults']['atomic_radius_data']`.
Compare with :func:`add_data`.
Returns:
dict: Dictionary mapping from an atom index to the set of
indices of atoms bonded to.
"""
if atomic_radius_data is None:
atomic_radius_data = settings['defaults']['atomic_radius_data']
def complete_calculation():
old_index = self.index
self.index = range(len(self))
fragments = self._divide_et_impera(offset=offset)
positions = np.array(self.loc[:, ['x', 'y', 'z']], order='F')
data = self.add_data([atomic_radius_data, 'valency'])
bond_radii = data[atomic_radius_data]
if modified_properties is not None:
bond_radii.update(pd.Series(modified_properties))
bond_radii = bond_radii.values
bond_dict = collections.defaultdict(set)
for i, j, k in product(*[range(x) for x in fragments.shape]):
# The following call is not side effect free and changes
# bond_dict
self._update_bond_dict(
fragments[i, j, k], positions, bond_radii,
bond_dict=bond_dict,
self_bonding_allowed=self_bonding_allowed)
for i in set(self.index) - set(bond_dict.keys()):
bond_dict[i] = {}
self.index = old_index
rename = dict(enumerate(self.index))
bond_dict = {rename[key]: {rename[i] for i in bond_dict[key]}
for key in bond_dict}
return bond_dict
if use_lookup:
try:
bond_dict = self._metadata['bond_dict']
except KeyError:
bond_dict = complete_calculation()
else:
bond_dict = complete_calculation()
if set_lookup:
self._metadata['bond_dict'] = bond_dict
return bond_dict |
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type, 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS) | Return the appropriate remapping for the given field, or None. | Below is the the instruction that describes the task:
### Input:
Return the appropriate remapping for the given field, or None.
### Response:
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None):
"""Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type, 'field',
python_name=python_name, json_name=json_name,
mappings=_JSON_FIELD_MAPPINGS) |
def do_objs(kbos):
"""Draw the actual plot"""
import orbfit, ephem, math
import re
re_string=w.FilterVar.get()
vlist=[]
for name in kbos:
if not re.search(re_string,name):
continue
vlist.append(name)
if type(kbos[name])==type(ephem.EllipticalBody()):
kbos[name].compute(w.date.get())
ra=kbos[name].ra
dec=kbos[name].dec
a=math.radians(10.0/3600.0)
b=a
ang=0.0
color='blue'
yoffset=+10
xoffset=+10
else:
yoffset=-10
xoffset=-10
file=kbos[name]
jdate=ephem.julian_date(w.date.get())
obs=568
try:
position=orbfit.predict(file,jdate,obs)
except:
continue
ra=math.radians(position[0])
dec=math.radians(position[1])
a=math.radians(position[2]/3600.0)
b=math.radians(position[3]/3600.0)
ang=math.radians(position[4])
if ( a> math.radians(1.0) ):
color='green'
else:
color='black'
if w.show_ellipse.get()==1 :
if ( a < math.radians(5.0) ):
w.create_ellipse(ra,dec,a,b,ang)
if ( a < math.radians(1.0) ):
w.create_point(ra,dec,size=2,color=color)
if w.show_labels.get()==1:
w.label(ra,dec,name,offset=[xoffset,yoffset])
vlist.sort()
for v in vlist:
w.objList.insert(END,v)
w.plot_pointings() | Draw the actual plot | Below is the the instruction that describes the task:
### Input:
Draw the actual plot
### Response:
def do_objs(kbos):
"""Draw the actual plot"""
import orbfit, ephem, math
import re
re_string=w.FilterVar.get()
vlist=[]
for name in kbos:
if not re.search(re_string,name):
continue
vlist.append(name)
if type(kbos[name])==type(ephem.EllipticalBody()):
kbos[name].compute(w.date.get())
ra=kbos[name].ra
dec=kbos[name].dec
a=math.radians(10.0/3600.0)
b=a
ang=0.0
color='blue'
yoffset=+10
xoffset=+10
else:
yoffset=-10
xoffset=-10
file=kbos[name]
jdate=ephem.julian_date(w.date.get())
obs=568
try:
position=orbfit.predict(file,jdate,obs)
except:
continue
ra=math.radians(position[0])
dec=math.radians(position[1])
a=math.radians(position[2]/3600.0)
b=math.radians(position[3]/3600.0)
ang=math.radians(position[4])
if ( a> math.radians(1.0) ):
color='green'
else:
color='black'
if w.show_ellipse.get()==1 :
if ( a < math.radians(5.0) ):
w.create_ellipse(ra,dec,a,b,ang)
if ( a < math.radians(1.0) ):
w.create_point(ra,dec,size=2,color=color)
if w.show_labels.get()==1:
w.label(ra,dec,name,offset=[xoffset,yoffset])
vlist.sort()
for v in vlist:
w.objList.insert(END,v)
w.plot_pointings() |
def drag(self):
'''
Drag the ui object to other point or ui object.
Usage:
d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y)
d(text="Clock").drag.to(text="Remove") # drag to another object
'''
def to(obj, *args, **kwargs):
if len(args) >= 2 or "x" in kwargs or "y" in kwargs:
drag_to = lambda x, y, steps=100: self.jsonrpc.dragTo(self.selector, x, y, steps)
else:
drag_to = lambda steps=100, **kwargs: self.jsonrpc.dragTo(self.selector, Selector(**kwargs), steps)
return drag_to(*args, **kwargs)
return type("Drag", (object,), {"to": to})() | Drag the ui object to other point or ui object.
Usage:
d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y)
d(text="Clock").drag.to(text="Remove") # drag to another object | Below is the the instruction that describes the task:
### Input:
Drag the ui object to other point or ui object.
Usage:
d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y)
d(text="Clock").drag.to(text="Remove") # drag to another object
### Response:
def drag(self):
'''
Drag the ui object to other point or ui object.
Usage:
d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y)
d(text="Clock").drag.to(text="Remove") # drag to another object
'''
def to(obj, *args, **kwargs):
if len(args) >= 2 or "x" in kwargs or "y" in kwargs:
drag_to = lambda x, y, steps=100: self.jsonrpc.dragTo(self.selector, x, y, steps)
else:
drag_to = lambda steps=100, **kwargs: self.jsonrpc.dragTo(self.selector, Selector(**kwargs), steps)
return drag_to(*args, **kwargs)
return type("Drag", (object,), {"to": to})() |
def register(cls, klass, name=DEFAULT_DBNAME):
"""Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float.
"""
if name in cls._dbs:
msg = "'{}' is already registered for an Eop database. Skipping".format(name)
log.warning(msg)
else:
cls._dbs[name] = klass | Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float. | Below is the the instruction that describes the task:
### Input:
Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float.
### Response:
def register(cls, klass, name=DEFAULT_DBNAME):
"""Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float.
"""
if name in cls._dbs:
msg = "'{}' is already registered for an Eop database. Skipping".format(name)
log.warning(msg)
else:
cls._dbs[name] = klass |
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata):
"""METADATA.pb weight matches postScriptName."""
WEIGHTS = {
"Thin": 100,
"ThinItalic": 100,
"ExtraLight": 200,
"ExtraLightItalic": 200,
"Light": 300,
"LightItalic": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"MediumItalic": 500,
"SemiBold": 600,
"SemiBoldItalic": 600,
"Bold": 700,
"BoldItalic": 700,
"ExtraBold": 800,
"ExtraBoldItalic": 800,
"Black": 900,
"BlackItalic": 900
}
pair = []
for k, weight in WEIGHTS.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
yield FAIL, ("METADATA.pb: Font weight value ({})"
" is invalid.").format(font_metadata.weight)
elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or
font_metadata.post_script_name.endswith('-' + pair[1][0])):
yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")"
" and weight value ({}). The name must be"
" ended with \"{}\" or \"{}\"."
"").format(font_metadata.post_script_name,
pair[0][1],
pair[0][0],
pair[1][0])
else:
yield PASS, "Weight value matches postScriptName." | METADATA.pb weight matches postScriptName. | Below is the the instruction that describes the task:
### Input:
METADATA.pb weight matches postScriptName.
### Response:
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata):
"""METADATA.pb weight matches postScriptName."""
WEIGHTS = {
"Thin": 100,
"ThinItalic": 100,
"ExtraLight": 200,
"ExtraLightItalic": 200,
"Light": 300,
"LightItalic": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"MediumItalic": 500,
"SemiBold": 600,
"SemiBoldItalic": 600,
"Bold": 700,
"BoldItalic": 700,
"ExtraBold": 800,
"ExtraBoldItalic": 800,
"Black": 900,
"BlackItalic": 900
}
pair = []
for k, weight in WEIGHTS.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
yield FAIL, ("METADATA.pb: Font weight value ({})"
" is invalid.").format(font_metadata.weight)
elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or
font_metadata.post_script_name.endswith('-' + pair[1][0])):
yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")"
" and weight value ({}). The name must be"
" ended with \"{}\" or \"{}\"."
"").format(font_metadata.post_script_name,
pair[0][1],
pair[0][0],
pair[1][0])
else:
yield PASS, "Weight value matches postScriptName." |
def price_and_currency(self):
"""Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Decimal representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.CurrencyCode')
else:
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.CurrencyCode')
else:
price = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.Amount')
currency = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.CurrencyCode')
if price:
dprice = Decimal(
price) / 100 if 'JP' not in self.region else Decimal(price)
return dprice, currency
else:
return None, None | Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Decimal representation of price.
2. ISO Currency code (string). | Below is the the instruction that describes the task:
### Input:
Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Decimal representation of price.
2. ISO Currency code (string).
### Response:
def price_and_currency(self):
"""Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Decimal representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.CurrencyCode')
else:
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.CurrencyCode')
else:
price = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.Amount')
currency = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.CurrencyCode')
if price:
dprice = Decimal(
price) / 100 if 'JP' not in self.region else Decimal(price)
return dprice, currency
else:
return None, None |
def message(self, pubnub, message):
"""
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
"""
try:
json_data = json.dumps(message.message.get('data'))
except AttributeError:
json_data = message.message
for func in SUBSCRIPTIONS[message.channel]:
# This means pubnub couldn't get the current state of the channel
# The pull_url is the location to pull the current state from.
# Returning None here to have the calling program handle this.
if 'pull_url' in json_data:
func(None)
else:
func(json.loads(json_data)) | Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s). | Below is the the instruction that describes the task:
### Input:
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
### Response:
def message(self, pubnub, message):
"""
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
"""
try:
json_data = json.dumps(message.message.get('data'))
except AttributeError:
json_data = message.message
for func in SUBSCRIPTIONS[message.channel]:
# This means pubnub couldn't get the current state of the channel
# The pull_url is the location to pull the current state from.
# Returning None here to have the calling program handle this.
if 'pull_url' in json_data:
func(None)
else:
func(json.loads(json_data)) |
def _get_result_paths(self, data):
""" Return a dict of ResultPath objects representing all possible output
"""
assignment_fp = str(self.Parameters['-o'].Value).strip('"')
if not os.path.isabs(assignment_fp):
assignment_fp = os.path.relpath(assignment_fp, self.WorkingDir)
return {'Assignments': ResultPath(assignment_fp, IsWritten=True)} | Return a dict of ResultPath objects representing all possible output | Below is the the instruction that describes the task:
### Input:
Return a dict of ResultPath objects representing all possible output
### Response:
def _get_result_paths(self, data):
""" Return a dict of ResultPath objects representing all possible output
"""
assignment_fp = str(self.Parameters['-o'].Value).strip('"')
if not os.path.isabs(assignment_fp):
assignment_fp = os.path.relpath(assignment_fp, self.WorkingDir)
return {'Assignments': ResultPath(assignment_fp, IsWritten=True)} |
def com_google_fonts_check_metadata_copyright(family_metadata):
"""METADATA.pb: Copyright notice is the same in all fonts?"""
copyright = None
fail = False
for f in family_metadata.fonts:
if copyright and f.copyright != copyright:
fail = True
copyright = f.copyright
if fail:
yield FAIL, ("METADATA.pb: Copyright field value"
" is inconsistent across family")
else:
yield PASS, "Copyright is consistent across family" | METADATA.pb: Copyright notice is the same in all fonts? | Below is the the instruction that describes the task:
### Input:
METADATA.pb: Copyright notice is the same in all fonts?
### Response:
def com_google_fonts_check_metadata_copyright(family_metadata):
"""METADATA.pb: Copyright notice is the same in all fonts?"""
copyright = None
fail = False
for f in family_metadata.fonts:
if copyright and f.copyright != copyright:
fail = True
copyright = f.copyright
if fail:
yield FAIL, ("METADATA.pb: Copyright field value"
" is inconsistent across family")
else:
yield PASS, "Copyright is consistent across family" |
def get_integration(self, id, **kwargs): # noqa: E501
"""Gets a single Wavefront integration by its id, along with its status # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool refresh:
:return: ResponseContainerIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_integration_with_http_info(id, **kwargs) # noqa: E501
return data | Gets a single Wavefront integration by its id, along with its status # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool refresh:
:return: ResponseContainerIntegration
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Gets a single Wavefront integration by its id, along with its status # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool refresh:
:return: ResponseContainerIntegration
If the method is called asynchronously,
returns the request thread.
### Response:
def get_integration(self, id, **kwargs): # noqa: E501
"""Gets a single Wavefront integration by its id, along with its status # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param bool refresh:
:return: ResponseContainerIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_integration_with_http_info(id, **kwargs) # noqa: E501
return data |
def format_short_title(self, format='html5', deparagraph=True,
mathjax=False, smart=True, extra_args=None):
"""Get the document short title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the short title is not available in
the document.
"""
if self.short_title is None:
return None
output_text = convert_lsstdoc_tex(
self.short_title, 'html5',
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text | Get the document short title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the short title is not available in
the document. | Below is the the instruction that describes the task:
### Input:
Get the document short title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the short title is not available in
the document.
### Response:
def format_short_title(self, format='html5', deparagraph=True,
mathjax=False, smart=True, extra_args=None):
"""Get the document short title in the specified markup format.
Parameters
----------
format : `str`, optional
Output format (such as ``'html5'`` or ``'plain'``).
deparagraph : `bool`, optional
Remove the paragraph tags from single paragraph content.
mathjax : `bool`, optional
Allow pandoc to use MathJax math markup.
smart : `True`, optional
Allow pandoc to create "smart" unicode punctuation.
extra_args : `list`, optional
Additional command line flags to pass to Pandoc. See
`lsstprojectmeta.pandoc.convert.convert_text`.
Returns
-------
output_text : `str`
Converted content or `None` if the short title is not available in
the document.
"""
if self.short_title is None:
return None
output_text = convert_lsstdoc_tex(
self.short_title, 'html5',
deparagraph=deparagraph,
mathjax=mathjax,
smart=smart,
extra_args=extra_args)
return output_text |
def isattr(self, key):
"""
Checks to see if an attribute exists. If it does, returns True, otherwise returns False
:param key: Dictionary key to be checked for presence in the datastore
:return: True/False depending on whether an attribute exists
"""
try:
if key in self.datastore:
return True
else:
return False
except AttributeError:
return False | Checks to see if an attribute exists. If it does, returns True, otherwise returns False
:param key: Dictionary key to be checked for presence in the datastore
:return: True/False depending on whether an attribute exists | Below is the the instruction that describes the task:
### Input:
Checks to see if an attribute exists. If it does, returns True, otherwise returns False
:param key: Dictionary key to be checked for presence in the datastore
:return: True/False depending on whether an attribute exists
### Response:
def isattr(self, key):
"""
Checks to see if an attribute exists. If it does, returns True, otherwise returns False
:param key: Dictionary key to be checked for presence in the datastore
:return: True/False depending on whether an attribute exists
"""
try:
if key in self.datastore:
return True
else:
return False
except AttributeError:
return False |
def get_user_log(self, language='da'):
"""Get the controller state"""
payload = """<getUserLog1 xmlns="utcs" />
<getUserLog2 xmlns="utcs">0</getUserLog2>
<getUserLog3 xmlns="utcs">{language}</getUserLog3>
""".format(language=language)
xdoc = self.connection.soap_action('/ws/ConfigurationService',
'getUserLog', payload)
if xdoc:
base64data = xdoc.find('./SOAP-ENV:Body/ns1:getUserLog4/ns1:data',
IHCSoapClient.ihcns).text
if not base64data:
return False
return base64.b64decode(base64data).decode('UTF-8')
return False | Get the controller state | Below is the the instruction that describes the task:
### Input:
Get the controller state
### Response:
def get_user_log(self, language='da'):
"""Get the controller state"""
payload = """<getUserLog1 xmlns="utcs" />
<getUserLog2 xmlns="utcs">0</getUserLog2>
<getUserLog3 xmlns="utcs">{language}</getUserLog3>
""".format(language=language)
xdoc = self.connection.soap_action('/ws/ConfigurationService',
'getUserLog', payload)
if xdoc:
base64data = xdoc.find('./SOAP-ENV:Body/ns1:getUserLog4/ns1:data',
IHCSoapClient.ihcns).text
if not base64data:
return False
return base64.b64decode(base64data).decode('UTF-8')
return False |
def get_group_by_id(self, group_id) -> typing.Optional['Group']:
"""
Args:
group_id: group ID
Returns: Group
"""
VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id')
for group in self.groups:
if group.group_id == group_id:
return group
return None | Args:
group_id: group ID
Returns: Group | Below is the the instruction that describes the task:
### Input:
Args:
group_id: group ID
Returns: Group
### Response:
def get_group_by_id(self, group_id) -> typing.Optional['Group']:
"""
Args:
group_id: group ID
Returns: Group
"""
VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id')
for group in self.groups:
if group.group_id == group_id:
return group
return None |
def emulate_users(self, request):
"""
The list view
"""
def display_as_link(self, obj):
try:
identifier = getattr(user_model_admin, list_display_link)(obj)
except AttributeError:
identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2]
emulate_user_id = request.session.get('emulate_user_id')
if emulate_user_id == obj.id:
return format_html('<strong>{}</strong>', identifier)
fmtargs = {
'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}),
'identifier': identifier,
}
return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs)
opts = self.UserModel._meta
app_label = opts.app_label
user_model_admin = self.admin_site._registry[self.UserModel]
request._lookup_model = self.UserModel
list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display)
# replace first entry in list_display_links by customized method display_as_link
list_display_link = list_display_links[0]
try:
list_display = list(user_model_admin.segmentation_list_display)
except AttributeError:
list_display = list(user_model_admin.list_display)
list_display.remove(list_display_link)
list_display.insert(0, 'display_as_link')
display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9
try:
display_as_link.short_description = user_model_admin.identifier.short_description
except AttributeError:
display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel)
self.display_as_link = six.create_bound_method(display_as_link, self)
ChangeList = self.get_changelist(request)
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self)
cl.formset = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name},
'is_popup': cl.is_popup,
'cl': cl,
'media': self.media,
'has_add_permission': False,
'opts': cl.opts,
'app_label': app_label,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
}
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context) | The list view | Below is the the instruction that describes the task:
### Input:
The list view
### Response:
def emulate_users(self, request):
"""
The list view
"""
def display_as_link(self, obj):
try:
identifier = getattr(user_model_admin, list_display_link)(obj)
except AttributeError:
identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2]
emulate_user_id = request.session.get('emulate_user_id')
if emulate_user_id == obj.id:
return format_html('<strong>{}</strong>', identifier)
fmtargs = {
'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}),
'identifier': identifier,
}
return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs)
opts = self.UserModel._meta
app_label = opts.app_label
user_model_admin = self.admin_site._registry[self.UserModel]
request._lookup_model = self.UserModel
list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display)
# replace first entry in list_display_links by customized method display_as_link
list_display_link = list_display_links[0]
try:
list_display = list(user_model_admin.segmentation_list_display)
except AttributeError:
list_display = list(user_model_admin.list_display)
list_display.remove(list_display_link)
list_display.insert(0, 'display_as_link')
display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9
try:
display_as_link.short_description = user_model_admin.identifier.short_description
except AttributeError:
display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel)
self.display_as_link = six.create_bound_method(display_as_link, self)
ChangeList = self.get_changelist(request)
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self)
cl.formset = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name},
'is_popup': cl.is_popup,
'cl': cl,
'media': self.media,
'has_add_permission': False,
'opts': cl.opts,
'app_label': app_label,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
}
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context) |
def clear(self, train=False, split=0):
"""Delete Features of each class from the database.
:param train: Whether or not to clear the FeatureKeys
:type train: bool
:param split: Which split of candidates to clear features from.
:type split: int
"""
# Clear Features for the candidates in the split passed in.
logger.info(f"Clearing Features (split {split})")
sub_query = (
self.session.query(Candidate.id).filter(Candidate.split == split).subquery()
)
query = self.session.query(Feature).filter(Feature.candidate_id.in_(sub_query))
query.delete(synchronize_session="fetch")
# Delete all old annotation keys
if train:
logger.debug(f"Clearing all FeatureKeys from {self.candidate_classes}...")
drop_all_keys(self.session, FeatureKey, self.candidate_classes) | Delete Features of each class from the database.
:param train: Whether or not to clear the FeatureKeys
:type train: bool
:param split: Which split of candidates to clear features from.
:type split: int | Below is the the instruction that describes the task:
### Input:
Delete Features of each class from the database.
:param train: Whether or not to clear the FeatureKeys
:type train: bool
:param split: Which split of candidates to clear features from.
:type split: int
### Response:
def clear(self, train=False, split=0):
"""Delete Features of each class from the database.
:param train: Whether or not to clear the FeatureKeys
:type train: bool
:param split: Which split of candidates to clear features from.
:type split: int
"""
# Clear Features for the candidates in the split passed in.
logger.info(f"Clearing Features (split {split})")
sub_query = (
self.session.query(Candidate.id).filter(Candidate.split == split).subquery()
)
query = self.session.query(Feature).filter(Feature.candidate_id.in_(sub_query))
query.delete(synchronize_session="fetch")
# Delete all old annotation keys
if train:
logger.debug(f"Clearing all FeatureKeys from {self.candidate_classes}...")
drop_all_keys(self.session, FeatureKey, self.candidate_classes) |
def create_token(self, creds):
'''
Create token with creds.
Token authorizes salt access if successful authentication
with the credentials in creds.
creds format is as follows:
{
'username': 'namestring',
'password': 'passwordstring',
'eauth': 'eauthtypestring',
}
examples of valid eauth type strings: 'pam' or 'ldap'
Returns dictionary of token information with the following format:
{
'token': 'tokenstring',
'start': starttimeinfractionalseconds,
'expire': expiretimeinfractionalseconds,
'name': 'usernamestring',
'user': 'usernamestring',
'username': 'usernamestring',
'eauth': 'eauthtypestring',
'perms: permslistofstrings,
}
The perms list provides those parts of salt for which the user is authorised
to execute.
example perms list:
[
"grains.*",
"status.*",
"sys.*",
"test.*"
]
'''
try:
tokenage = self.resolver.mk_token(creds)
except Exception as ex:
raise EauthAuthenticationError(
"Authentication failed with {0}.".format(repr(ex)))
if 'token' not in tokenage:
raise EauthAuthenticationError("Authentication failed with provided credentials.")
# Grab eauth config for the current backend for the current user
tokenage_eauth = self.opts['external_auth'][tokenage['eauth']]
if tokenage['name'] in tokenage_eauth:
tokenage['perms'] = tokenage_eauth[tokenage['name']]
else:
tokenage['perms'] = tokenage_eauth['*']
tokenage['user'] = tokenage['name']
tokenage['username'] = tokenage['name']
return tokenage | Create token with creds.
Token authorizes salt access if successful authentication
with the credentials in creds.
creds format is as follows:
{
'username': 'namestring',
'password': 'passwordstring',
'eauth': 'eauthtypestring',
}
examples of valid eauth type strings: 'pam' or 'ldap'
Returns dictionary of token information with the following format:
{
'token': 'tokenstring',
'start': starttimeinfractionalseconds,
'expire': expiretimeinfractionalseconds,
'name': 'usernamestring',
'user': 'usernamestring',
'username': 'usernamestring',
'eauth': 'eauthtypestring',
'perms: permslistofstrings,
}
The perms list provides those parts of salt for which the user is authorised
to execute.
example perms list:
[
"grains.*",
"status.*",
"sys.*",
"test.*"
] | Below is the the instruction that describes the task:
### Input:
Create token with creds.
Token authorizes salt access if successful authentication
with the credentials in creds.
creds format is as follows:
{
'username': 'namestring',
'password': 'passwordstring',
'eauth': 'eauthtypestring',
}
examples of valid eauth type strings: 'pam' or 'ldap'
Returns dictionary of token information with the following format:
{
'token': 'tokenstring',
'start': starttimeinfractionalseconds,
'expire': expiretimeinfractionalseconds,
'name': 'usernamestring',
'user': 'usernamestring',
'username': 'usernamestring',
'eauth': 'eauthtypestring',
'perms: permslistofstrings,
}
The perms list provides those parts of salt for which the user is authorised
to execute.
example perms list:
[
"grains.*",
"status.*",
"sys.*",
"test.*"
]
### Response:
def create_token(self, creds):
'''
Create token with creds.
Token authorizes salt access if successful authentication
with the credentials in creds.
creds format is as follows:
{
'username': 'namestring',
'password': 'passwordstring',
'eauth': 'eauthtypestring',
}
examples of valid eauth type strings: 'pam' or 'ldap'
Returns dictionary of token information with the following format:
{
'token': 'tokenstring',
'start': starttimeinfractionalseconds,
'expire': expiretimeinfractionalseconds,
'name': 'usernamestring',
'user': 'usernamestring',
'username': 'usernamestring',
'eauth': 'eauthtypestring',
'perms: permslistofstrings,
}
The perms list provides those parts of salt for which the user is authorised
to execute.
example perms list:
[
"grains.*",
"status.*",
"sys.*",
"test.*"
]
'''
try:
tokenage = self.resolver.mk_token(creds)
except Exception as ex:
raise EauthAuthenticationError(
"Authentication failed with {0}.".format(repr(ex)))
if 'token' not in tokenage:
raise EauthAuthenticationError("Authentication failed with provided credentials.")
# Grab eauth config for the current backend for the current user
tokenage_eauth = self.opts['external_auth'][tokenage['eauth']]
if tokenage['name'] in tokenage_eauth:
tokenage['perms'] = tokenage_eauth[tokenage['name']]
else:
tokenage['perms'] = tokenage_eauth['*']
tokenage['user'] = tokenage['name']
tokenage['username'] = tokenage['name']
return tokenage |
def store_net_db(self, tenant_id, net, net_dict, result):
"""Store service network in DB. """
network_dict = {'name': net_dict.get('name'),
'config_profile': net_dict.get('config_profile'),
'segmentation_id': net_dict.get('segmentation_id'),
'tenant_id': tenant_id,
'fwd_mode': net_dict.get('fwd_mode'),
'vlan': net_dict.get('vlan_id')}
self.add_network_db(net, network_dict, fw_const.FW_CONST, result) | Store service network in DB. | Below is the the instruction that describes the task:
### Input:
Store service network in DB.
### Response:
def store_net_db(self, tenant_id, net, net_dict, result):
"""Store service network in DB. """
network_dict = {'name': net_dict.get('name'),
'config_profile': net_dict.get('config_profile'),
'segmentation_id': net_dict.get('segmentation_id'),
'tenant_id': tenant_id,
'fwd_mode': net_dict.get('fwd_mode'),
'vlan': net_dict.get('vlan_id')}
self.add_network_db(net, network_dict, fw_const.FW_CONST, result) |
def view(self, buffer_time = 10.0, sample_size = 10000, name=None, description=None, start=False):
"""
Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service.
"""
if name is None:
name = ''.join(random.choice('0123456789abcdef') for x in range(16))
if self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
if self._json_stream:
view_stream = self._json_stream
else:
self._json_stream = self.as_json(force_object=False)._layout(hidden=True)
view_stream = self._json_stream
# colocate map operator with stream that is being viewed.
if self._placeable:
self._colocate(view_stream, 'view')
else:
view_stream = self
port = view_stream.oport.name
view_config = {
'name': name,
'port': port,
'description': description,
'bufferTime': buffer_time,
'sampleSize': sample_size}
if start:
view_config['activateOption'] = 'automatic'
view_stream.oport.operator.addViewConfig(view_config)
_view = View(name)
self.topology.graph._views.append(_view)
return _view | Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service. | Below is the the instruction that describes the task:
### Input:
Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service.
### Response:
def view(self, buffer_time = 10.0, sample_size = 10000, name=None, description=None, start=False):
"""
Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service.
"""
if name is None:
name = ''.join(random.choice('0123456789abcdef') for x in range(16))
if self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
if self._json_stream:
view_stream = self._json_stream
else:
self._json_stream = self.as_json(force_object=False)._layout(hidden=True)
view_stream = self._json_stream
# colocate map operator with stream that is being viewed.
if self._placeable:
self._colocate(view_stream, 'view')
else:
view_stream = self
port = view_stream.oport.name
view_config = {
'name': name,
'port': port,
'description': description,
'bufferTime': buffer_time,
'sampleSize': sample_size}
if start:
view_config['activateOption'] = 'automatic'
view_stream.oport.operator.addViewConfig(view_config)
_view = View(name)
self.topology.graph._views.append(_view)
return _view |
def write(self, client):
"""
Write current data to db in plc
"""
assert(isinstance(self._bytearray, DB))
assert(self.row_size >= 0)
db_nr = self._bytearray.db_number
offset = self.db_offset
data = self.get_bytearray()[offset:offset+self.row_size]
db_offset = self.db_offset
# indicate start of write only area of row!
if self.row_offset:
data = data[self.row_offset:]
db_offset += self.row_offset
client.db_write(db_nr, db_offset, data) | Write current data to db in plc | Below is the the instruction that describes the task:
### Input:
Write current data to db in plc
### Response:
def write(self, client):
"""
Write current data to db in plc
"""
assert(isinstance(self._bytearray, DB))
assert(self.row_size >= 0)
db_nr = self._bytearray.db_number
offset = self.db_offset
data = self.get_bytearray()[offset:offset+self.row_size]
db_offset = self.db_offset
# indicate start of write only area of row!
if self.row_offset:
data = data[self.row_offset:]
db_offset += self.row_offset
client.db_write(db_nr, db_offset, data) |
def open(self, mode='r', name=None, **kwargs):
"""Opens this file, or a file under this directory.
``path.open(mode, name)`` is a shortcut for ``(path/name).open(mode)``.
Note that this uses :func:`io.open()` which behaves differently from
:func:`open()` on Python 2; see the appropriate documentation.
:param name: Path component to append to this path before opening the
file.
"""
if name is not None:
return io.open((self / name).path, mode=mode, **kwargs)
else:
return io.open(self.path, mode=mode, **kwargs) | Opens this file, or a file under this directory.
``path.open(mode, name)`` is a shortcut for ``(path/name).open(mode)``.
Note that this uses :func:`io.open()` which behaves differently from
:func:`open()` on Python 2; see the appropriate documentation.
:param name: Path component to append to this path before opening the
file. | Below is the the instruction that describes the task:
### Input:
Opens this file, or a file under this directory.
``path.open(mode, name)`` is a shortcut for ``(path/name).open(mode)``.
Note that this uses :func:`io.open()` which behaves differently from
:func:`open()` on Python 2; see the appropriate documentation.
:param name: Path component to append to this path before opening the
file.
### Response:
def open(self, mode='r', name=None, **kwargs):
"""Opens this file, or a file under this directory.
``path.open(mode, name)`` is a shortcut for ``(path/name).open(mode)``.
Note that this uses :func:`io.open()` which behaves differently from
:func:`open()` on Python 2; see the appropriate documentation.
:param name: Path component to append to this path before opening the
file.
"""
if name is not None:
return io.open((self / name).path, mode=mode, **kwargs)
else:
return io.open(self.path, mode=mode, **kwargs) |
def fill(self, config, section):
"""Fill data from a given configuration section.
Args:
config (configparser): the configuration file
section (str): the section to use
"""
if config.has_section(section):
default_url = self.DEFAULT_REPOSITORIES.get(self.name, '')
self.url = RepositoryURL(config_get(config, section, 'repository', default_url))
self.username = config_get(config, section, 'username', '')
self.password = config_get(config, section, 'password', '') | Fill data from a given configuration section.
Args:
config (configparser): the configuration file
section (str): the section to use | Below is the the instruction that describes the task:
### Input:
Fill data from a given configuration section.
Args:
config (configparser): the configuration file
section (str): the section to use
### Response:
def fill(self, config, section):
"""Fill data from a given configuration section.
Args:
config (configparser): the configuration file
section (str): the section to use
"""
if config.has_section(section):
default_url = self.DEFAULT_REPOSITORIES.get(self.name, '')
self.url = RepositoryURL(config_get(config, section, 'repository', default_url))
self.username = config_get(config, section, 'username', '')
self.password = config_get(config, section, 'password', '') |
def resize_old(self, block_size, order=0, mode='constant', cval=False):
'''
geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True)
Returns resized georaster
'''
if not cval:
cval = np.nan
if (self.raster.dtype.name.find('float') != -1 and
np.max(np.abs([self.max(), self.min()])) > 1):
raster2 = (self.raster-self.min())/(self.max()-self.min())
else:
raster2 = self.raster.copy()
raster2 = raster2.astype(float)
raster2[self.raster.mask] = np.nan
raster2 = resize(raster2, block_size, order=order, mode=mode, cval=cval)
raster2 = np.ma.masked_array(raster2, mask=np.isnan(raster2),
fill_value=self.raster.fill_value)
raster2 = raster2*(self.max()-self.min())+self.min()
raster2[raster2.mask] = self.nodata_value
raster2.mask = np.logical_or(np.isnan(raster2.data), raster2.data == self.nodata_value)
geot = list(self.geot)
[geot[-1], geot[1]] = np.array([geot[-1], geot[1]])*self.shape/block_size
return GeoRaster(raster2, tuple(geot), nodata_value=self.nodata_value,\
projection=self.projection, datatype=self.datatype) | geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True)
Returns resized georaster | Below is the the instruction that describes the task:
### Input:
geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True)
Returns resized georaster
### Response:
def resize_old(self, block_size, order=0, mode='constant', cval=False):
'''
geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True)
Returns resized georaster
'''
if not cval:
cval = np.nan
if (self.raster.dtype.name.find('float') != -1 and
np.max(np.abs([self.max(), self.min()])) > 1):
raster2 = (self.raster-self.min())/(self.max()-self.min())
else:
raster2 = self.raster.copy()
raster2 = raster2.astype(float)
raster2[self.raster.mask] = np.nan
raster2 = resize(raster2, block_size, order=order, mode=mode, cval=cval)
raster2 = np.ma.masked_array(raster2, mask=np.isnan(raster2),
fill_value=self.raster.fill_value)
raster2 = raster2*(self.max()-self.min())+self.min()
raster2[raster2.mask] = self.nodata_value
raster2.mask = np.logical_or(np.isnan(raster2.data), raster2.data == self.nodata_value)
geot = list(self.geot)
[geot[-1], geot[1]] = np.array([geot[-1], geot[1]])*self.shape/block_size
return GeoRaster(raster2, tuple(geot), nodata_value=self.nodata_value,\
projection=self.projection, datatype=self.datatype) |
def Martin_Sims(m, x, D, rhol, rhog, hl):
r'''Calculates the two-phase non-boiling heat transfer coefficient of a
liquid and gas flowing inside a tube of any inclination, as in [1]_ and
reviewed in [2]_.
.. math::
\frac{h_{TP}}{h_l} = 1 + 0.64\sqrt{\frac{V_{gs}}{V_{ls}}}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
hl : float
Liquid-phase heat transfer coefficient as described below, [W/m^2/K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
No suggestion for how to calculate the liquid-phase heat transfer
coefficient is given in [1]_; [2]_ suggests to use the same procedure as
in `Knott`, but this has not been implemented.
Examples
--------
>>> Martin_Sims(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, hl=141.2)
5563.280000000001
References
----------
.. [1] Martin, B. W, and G. E Sims. "Forced Convection Heat Transfer to
Water with Air Injection in a Rectangular Duct." International Journal
of Heat and Mass Transfer 14, no. 8 (August 1, 1971): 1115-34.
doi:10.1016/0017-9310(71)90208-0.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
'''
Vgs = m*x/(rhog*pi/4*D**2)
Vls = m*(1-x)/(rhol*pi/4*D**2)
return hl*(1 + 0.64*(Vgs/Vls)**0.5) | r'''Calculates the two-phase non-boiling heat transfer coefficient of a
liquid and gas flowing inside a tube of any inclination, as in [1]_ and
reviewed in [2]_.
.. math::
\frac{h_{TP}}{h_l} = 1 + 0.64\sqrt{\frac{V_{gs}}{V_{ls}}}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
hl : float
Liquid-phase heat transfer coefficient as described below, [W/m^2/K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
No suggestion for how to calculate the liquid-phase heat transfer
coefficient is given in [1]_; [2]_ suggests to use the same procedure as
in `Knott`, but this has not been implemented.
Examples
--------
>>> Martin_Sims(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, hl=141.2)
5563.280000000001
References
----------
.. [1] Martin, B. W, and G. E Sims. "Forced Convection Heat Transfer to
Water with Air Injection in a Rectangular Duct." International Journal
of Heat and Mass Transfer 14, no. 8 (August 1, 1971): 1115-34.
doi:10.1016/0017-9310(71)90208-0.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the two-phase non-boiling heat transfer coefficient of a
liquid and gas flowing inside a tube of any inclination, as in [1]_ and
reviewed in [2]_.
.. math::
\frac{h_{TP}}{h_l} = 1 + 0.64\sqrt{\frac{V_{gs}}{V_{ls}}}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
hl : float
Liquid-phase heat transfer coefficient as described below, [W/m^2/K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
No suggestion for how to calculate the liquid-phase heat transfer
coefficient is given in [1]_; [2]_ suggests to use the same procedure as
in `Knott`, but this has not been implemented.
Examples
--------
>>> Martin_Sims(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, hl=141.2)
5563.280000000001
References
----------
.. [1] Martin, B. W, and G. E Sims. "Forced Convection Heat Transfer to
Water with Air Injection in a Rectangular Duct." International Journal
of Heat and Mass Transfer 14, no. 8 (August 1, 1971): 1115-34.
doi:10.1016/0017-9310(71)90208-0.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
### Response:
def Martin_Sims(m, x, D, rhol, rhog, hl):
r'''Calculates the two-phase non-boiling heat transfer coefficient of a
liquid and gas flowing inside a tube of any inclination, as in [1]_ and
reviewed in [2]_.
.. math::
\frac{h_{TP}}{h_l} = 1 + 0.64\sqrt{\frac{V_{gs}}{V_{ls}}}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
hl : float
Liquid-phase heat transfer coefficient as described below, [W/m^2/K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
No suggestion for how to calculate the liquid-phase heat transfer
coefficient is given in [1]_; [2]_ suggests to use the same procedure as
in `Knott`, but this has not been implemented.
Examples
--------
>>> Martin_Sims(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, hl=141.2)
5563.280000000001
References
----------
.. [1] Martin, B. W, and G. E Sims. "Forced Convection Heat Transfer to
Water with Air Injection in a Rectangular Duct." International Journal
of Heat and Mass Transfer 14, no. 8 (August 1, 1971): 1115-34.
doi:10.1016/0017-9310(71)90208-0.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
'''
Vgs = m*x/(rhog*pi/4*D**2)
Vls = m*(1-x)/(rhol*pi/4*D**2)
return hl*(1 + 0.64*(Vgs/Vls)**0.5) |
def register_view(self, view):
""" register_view will create the needed structure
in order to be able to sent all data to Prometheus
"""
v_name = get_view_name(self.options.namespace, view)
if v_name not in self.registered_views:
desc = {'name': v_name,
'documentation': view.description,
'labels': list(map(sanitize, view.columns))}
self.registered_views[v_name] = desc
self.registry.register(self) | register_view will create the needed structure
in order to be able to sent all data to Prometheus | Below is the the instruction that describes the task:
### Input:
register_view will create the needed structure
in order to be able to sent all data to Prometheus
### Response:
def register_view(self, view):
""" register_view will create the needed structure
in order to be able to sent all data to Prometheus
"""
v_name = get_view_name(self.options.namespace, view)
if v_name not in self.registered_views:
desc = {'name': v_name,
'documentation': view.description,
'labels': list(map(sanitize, view.columns))}
self.registered_views[v_name] = desc
self.registry.register(self) |
def user_remove(name, user=None, password=None, host=None, port=None,
database='admin', authdb=None):
'''
Remove a MongoDB user
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Removing user %s', name)
mdb = pymongo.database.Database(conn, database)
mdb.remove_user(name)
except pymongo.errors.PyMongoError as err:
log.error('Creating database %s failed with error: %s', name, err)
return six.text_type(err)
return True | Remove a MongoDB user
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database> | Below is the the instruction that describes the task:
### Input:
Remove a MongoDB user
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database>
### Response:
def user_remove(name, user=None, password=None, host=None, port=None,
database='admin', authdb=None):
'''
Remove a MongoDB user
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port)
if not conn:
return 'Failed to connect to mongo database'
try:
log.info('Removing user %s', name)
mdb = pymongo.database.Database(conn, database)
mdb.remove_user(name)
except pymongo.errors.PyMongoError as err:
log.error('Creating database %s failed with error: %s', name, err)
return six.text_type(err)
return True |
def _repr_categories_info(self):
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]" | Returns a string representation of the footer. | Below is the the instruction that describes the task:
### Input:
Returns a string representation of the footer.
### Response:
def _repr_categories_info(self):
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]" |
def clone_with_updates(self, **kwargs):
"""Returns new BindingPrediction with updated fields"""
fields_dict = self.to_dict()
fields_dict.update(kwargs)
return BindingPrediction(**fields_dict) | Returns new BindingPrediction with updated fields | Below is the the instruction that describes the task:
### Input:
Returns new BindingPrediction with updated fields
### Response:
def clone_with_updates(self, **kwargs):
"""Returns new BindingPrediction with updated fields"""
fields_dict = self.to_dict()
fields_dict.update(kwargs)
return BindingPrediction(**fields_dict) |
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None) | Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit | Below is the the instruction that describes the task:
### Input:
Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
### Response:
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None) |
def _skip_char_around(self, string, char='\n'):
"""
Custom pseudo method for skipping a given char around a string.
The default char to be skipped is the new line (\n) one.
Example:
'\nHello\n' would call ``_base_compile`` with 'Hello' only.
"""
starts, ends = '', ''
n = len(char)
if string.startswith(char):
starts = string[:n]
string = string[n:]
if string.endswith(char):
ends = string[-n:]
string = string[:-n]
string = self._base_compile(string)
if starts:
string = starts + string
if ends:
string = string + ends
return string | Custom pseudo method for skipping a given char around a string.
The default char to be skipped is the new line (\n) one.
Example:
'\nHello\n' would call ``_base_compile`` with 'Hello' only. | Below is the the instruction that describes the task:
### Input:
Custom pseudo method for skipping a given char around a string.
The default char to be skipped is the new line (\n) one.
Example:
'\nHello\n' would call ``_base_compile`` with 'Hello' only.
### Response:
def _skip_char_around(self, string, char='\n'):
"""
Custom pseudo method for skipping a given char around a string.
The default char to be skipped is the new line (\n) one.
Example:
'\nHello\n' would call ``_base_compile`` with 'Hello' only.
"""
starts, ends = '', ''
n = len(char)
if string.startswith(char):
starts = string[:n]
string = string[n:]
if string.endswith(char):
ends = string[-n:]
string = string[:-n]
string = self._base_compile(string)
if starts:
string = starts + string
if ends:
string = string + ends
return string |
def nx_dag_node_rank(graph, nodes=None):
"""
Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1]
"""
import utool as ut
source = list(ut.nx_source_nodes(graph))[0]
longest_paths = dict([(target, dag_longest_path(graph, source, target))
for target in graph.nodes()])
node_to_rank = ut.map_dict_vals(len, longest_paths)
if nodes is None:
return node_to_rank
else:
ranks = ut.dict_take(node_to_rank, nodes)
return ranks | Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1] | Below is the the instruction that describes the task:
### Input:
Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1]
### Response:
def nx_dag_node_rank(graph, nodes=None):
"""
Returns rank of nodes that define the "level" each node is on in a
topological sort. This is the same as the Graphviz dot rank.
Ignore:
simple_graph = ut.simplify_graph(exi_graph)
adj_dict = ut.nx_to_adj_dict(simple_graph)
import plottool as pt
pt.qt4ensure()
pt.show_nx(graph)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]}
>>> nodes = [2, 1, 5]
>>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph)
>>> graph = f_graph.reverse()
>>> #ranks = ut.nx_dag_node_rank(graph, nodes)
>>> ranks = ut.nx_dag_node_rank(graph, nodes)
>>> result = ('ranks = %r' % (ranks,))
>>> print(result)
ranks = [3, 2, 1]
"""
import utool as ut
source = list(ut.nx_source_nodes(graph))[0]
longest_paths = dict([(target, dag_longest_path(graph, source, target))
for target in graph.nodes()])
node_to_rank = ut.map_dict_vals(len, longest_paths)
if nodes is None:
return node_to_rank
else:
ranks = ut.dict_take(node_to_rank, nodes)
return ranks |
def simulated_binary_crossover(random, mom, dad, args):
"""Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass
return [bro, sis]
else:
return [mom, dad] | Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions. | Below is the the instruction that describes the task:
### Input:
Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
### Response:
def simulated_binary_crossover(random, mom, dad, args):
"""Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass
return [bro, sis]
else:
return [mom, dad] |
def string_found(self, ypos, xpos, string):
"""
Return True if `string` is found at screen co-ordinates
`ypos`/`xpos`, False otherwise.
Co-ordinates are 1 based, as listed in the status area of the
terminal.
"""
found = self.string_get(ypos, xpos, len(string))
log.debug('string_found() saw "{0}"'.format(found))
return found == string | Return True if `string` is found at screen co-ordinates
`ypos`/`xpos`, False otherwise.
Co-ordinates are 1 based, as listed in the status area of the
terminal. | Below is the the instruction that describes the task:
### Input:
Return True if `string` is found at screen co-ordinates
`ypos`/`xpos`, False otherwise.
Co-ordinates are 1 based, as listed in the status area of the
terminal.
### Response:
def string_found(self, ypos, xpos, string):
"""
Return True if `string` is found at screen co-ordinates
`ypos`/`xpos`, False otherwise.
Co-ordinates are 1 based, as listed in the status area of the
terminal.
"""
found = self.string_get(ypos, xpos, len(string))
log.debug('string_found() saw "{0}"'.format(found))
return found == string |
def _handle_non_existant_index(cls):
"""
Handle and check that some configuration index exists.
"""
try:
# We try to call the http code.
PyFunceble.INTERN["http_code"]
except KeyError:
# If it is not found.
# We initiate an empty http code.
PyFunceble.INTERN["http_code"] = "*" * 3
try:
# We try to call the referer.
PyFunceble.INTERN["referer"]
except KeyError:
# If it is not found.
# We initate an `Unknown` referer.
PyFunceble.INTERN["referer"] = "Unknown" | Handle and check that some configuration index exists. | Below is the the instruction that describes the task:
### Input:
Handle and check that some configuration index exists.
### Response:
def _handle_non_existant_index(cls):
"""
Handle and check that some configuration index exists.
"""
try:
# We try to call the http code.
PyFunceble.INTERN["http_code"]
except KeyError:
# If it is not found.
# We initiate an empty http code.
PyFunceble.INTERN["http_code"] = "*" * 3
try:
# We try to call the referer.
PyFunceble.INTERN["referer"]
except KeyError:
# If it is not found.
# We initate an `Unknown` referer.
PyFunceble.INTERN["referer"] = "Unknown" |
def do_contactplaceholder(parser, token):
"""
Method that parse the contactplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return ContactPlaceholderNode(name, **params) | Method that parse the contactplaceholder template tag. | Below is the the instruction that describes the task:
### Input:
Method that parse the contactplaceholder template tag.
### Response:
def do_contactplaceholder(parser, token):
"""
Method that parse the contactplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return ContactPlaceholderNode(name, **params) |
def setHint( self, hint ):
"""
Sets the hint for this widget.
:param hint | <str>
"""
self._hint = hint
self.detailWidget().setHint(hint) | Sets the hint for this widget.
:param hint | <str> | Below is the the instruction that describes the task:
### Input:
Sets the hint for this widget.
:param hint | <str>
### Response:
def setHint( self, hint ):
"""
Sets the hint for this widget.
:param hint | <str>
"""
self._hint = hint
self.detailWidget().setHint(hint) |
def read_sampling_params_from_config(cp, section_group=None,
section='sampling_params'):
"""Reads sampling parameters from the given config file.
Parameters are read from the `[({section_group}_){section}]` section.
The options should list the variable args to transform; the parameters they
point to should list the parameters they are to be transformed to for
sampling. If a multiple parameters are transformed together, they should
be comma separated. Example:
.. code-block:: ini
[sampling_params]
mass1, mass2 = mchirp, logitq
spin1_a = logitspin1_a
Note that only the final sampling parameters should be listed, even if
multiple intermediate transforms are needed. (In the above example, a
transform is needed to go from mass1, mass2 to mchirp, q, then another one
needed to go from q to logitq.) These transforms should be specified
in separate sections; see ``transforms.read_transforms_from_config`` for
details.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
section_group : str, optional
Append `{section_group}_` to the section name. Default is None.
section : str, optional
The name of the section. Default is 'sampling_params'.
Returns
-------
sampling_params : list
The list of sampling parameters to use instead.
replaced_params : list
The list of variable args to replace in the sampler.
"""
if section_group is not None:
section_prefix = '{}_'.format(section_group)
else:
section_prefix = ''
section = section_prefix + section
replaced_params = set()
sampling_params = set()
for args in cp.options(section):
map_args = cp.get(section, args)
sampling_params.update(set(map(str.strip, map_args.split(','))))
replaced_params.update(set(map(str.strip, args.split(','))))
return list(sampling_params), list(replaced_params) | Reads sampling parameters from the given config file.
Parameters are read from the `[({section_group}_){section}]` section.
The options should list the variable args to transform; the parameters they
point to should list the parameters they are to be transformed to for
sampling. If a multiple parameters are transformed together, they should
be comma separated. Example:
.. code-block:: ini
[sampling_params]
mass1, mass2 = mchirp, logitq
spin1_a = logitspin1_a
Note that only the final sampling parameters should be listed, even if
multiple intermediate transforms are needed. (In the above example, a
transform is needed to go from mass1, mass2 to mchirp, q, then another one
needed to go from q to logitq.) These transforms should be specified
in separate sections; see ``transforms.read_transforms_from_config`` for
details.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
section_group : str, optional
Append `{section_group}_` to the section name. Default is None.
section : str, optional
The name of the section. Default is 'sampling_params'.
Returns
-------
sampling_params : list
The list of sampling parameters to use instead.
replaced_params : list
The list of variable args to replace in the sampler. | Below is the the instruction that describes the task:
### Input:
Reads sampling parameters from the given config file.
Parameters are read from the `[({section_group}_){section}]` section.
The options should list the variable args to transform; the parameters they
point to should list the parameters they are to be transformed to for
sampling. If a multiple parameters are transformed together, they should
be comma separated. Example:
.. code-block:: ini
[sampling_params]
mass1, mass2 = mchirp, logitq
spin1_a = logitspin1_a
Note that only the final sampling parameters should be listed, even if
multiple intermediate transforms are needed. (In the above example, a
transform is needed to go from mass1, mass2 to mchirp, q, then another one
needed to go from q to logitq.) These transforms should be specified
in separate sections; see ``transforms.read_transforms_from_config`` for
details.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
section_group : str, optional
Append `{section_group}_` to the section name. Default is None.
section : str, optional
The name of the section. Default is 'sampling_params'.
Returns
-------
sampling_params : list
The list of sampling parameters to use instead.
replaced_params : list
The list of variable args to replace in the sampler.
### Response:
def read_sampling_params_from_config(cp, section_group=None,
section='sampling_params'):
"""Reads sampling parameters from the given config file.
Parameters are read from the `[({section_group}_){section}]` section.
The options should list the variable args to transform; the parameters they
point to should list the parameters they are to be transformed to for
sampling. If a multiple parameters are transformed together, they should
be comma separated. Example:
.. code-block:: ini
[sampling_params]
mass1, mass2 = mchirp, logitq
spin1_a = logitspin1_a
Note that only the final sampling parameters should be listed, even if
multiple intermediate transforms are needed. (In the above example, a
transform is needed to go from mass1, mass2 to mchirp, q, then another one
needed to go from q to logitq.) These transforms should be specified
in separate sections; see ``transforms.read_transforms_from_config`` for
details.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
section_group : str, optional
Append `{section_group}_` to the section name. Default is None.
section : str, optional
The name of the section. Default is 'sampling_params'.
Returns
-------
sampling_params : list
The list of sampling parameters to use instead.
replaced_params : list
The list of variable args to replace in the sampler.
"""
if section_group is not None:
section_prefix = '{}_'.format(section_group)
else:
section_prefix = ''
section = section_prefix + section
replaced_params = set()
sampling_params = set()
for args in cp.options(section):
map_args = cp.get(section, args)
sampling_params.update(set(map(str.strip, map_args.split(','))))
replaced_params.update(set(map(str.strip, args.split(','))))
return list(sampling_params), list(replaced_params) |
def mangle(self, name, x):
"""
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
"""
h = abs(hash(name))
return '%s-%s' % (h, x) | Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name. | Below is the the instruction that describes the task:
### Input:
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
### Response:
def mangle(self, name, x):
"""
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
"""
h = abs(hash(name))
return '%s-%s' % (h, x) |
def calculate_acl(data, m=5, dtype=int):
r"""Calculates the autocorrelation length (ACL).
Given a normalized autocorrelation function :math:`\rho[i]` (by normalized,
we mean that :math:`\rho[0] = 1`), the ACL :math:`\tau` is:
.. math::
\tau = 1 + 2 \sum_{i=1}^{K} \rho[i].
The number of samples used :math:`K` is found by using the first point
such that:
.. math::
m \tau[K] \leq K,
where :math:`m` is a tuneable parameter (default = 5). If no such point
exists, then the given data set it too short to estimate the ACL; in this
case ``inf`` is returned.
This algorithm for computing the ACL is taken from:
N. Madras and A.D. Sokal, J. Stat. Phys. 50, 109 (1988).
Parameters
-----------
data : TimeSeries or array
A TimeSeries of data.
m : int
The number of autocorrelation lengths to use for determining the window
size :math:`K` (see above).
dtype : int or float
The datatype of the output. If the dtype was set to int, then the
ceiling is returned.
Returns
-------
acl : int or float
The autocorrelation length. If the ACL cannot be estimated, returns
``numpy.inf``.
"""
# sanity check output data type
if dtype not in [int, float]:
raise ValueError("The dtype must be either int or float.")
# if we have only a single point, just return 1
if len(data) < 2:
return 1
# calculate ACF that is normalized by the zero-lag value
acf = calculate_acf(data)
cacf = 2 * acf.numpy().cumsum() - 1
win = m * cacf <= numpy.arange(len(cacf))
if win.any():
acl = cacf[numpy.where(win)[0][0]]
if dtype == int:
acl = int(numpy.ceil(acl))
else:
acl = numpy.inf
return acl | r"""Calculates the autocorrelation length (ACL).
Given a normalized autocorrelation function :math:`\rho[i]` (by normalized,
we mean that :math:`\rho[0] = 1`), the ACL :math:`\tau` is:
.. math::
\tau = 1 + 2 \sum_{i=1}^{K} \rho[i].
The number of samples used :math:`K` is found by using the first point
such that:
.. math::
m \tau[K] \leq K,
where :math:`m` is a tuneable parameter (default = 5). If no such point
exists, then the given data set it too short to estimate the ACL; in this
case ``inf`` is returned.
This algorithm for computing the ACL is taken from:
N. Madras and A.D. Sokal, J. Stat. Phys. 50, 109 (1988).
Parameters
-----------
data : TimeSeries or array
A TimeSeries of data.
m : int
The number of autocorrelation lengths to use for determining the window
size :math:`K` (see above).
dtype : int or float
The datatype of the output. If the dtype was set to int, then the
ceiling is returned.
Returns
-------
acl : int or float
The autocorrelation length. If the ACL cannot be estimated, returns
``numpy.inf``. | Below is the the instruction that describes the task:
### Input:
r"""Calculates the autocorrelation length (ACL).
Given a normalized autocorrelation function :math:`\rho[i]` (by normalized,
we mean that :math:`\rho[0] = 1`), the ACL :math:`\tau` is:
.. math::
\tau = 1 + 2 \sum_{i=1}^{K} \rho[i].
The number of samples used :math:`K` is found by using the first point
such that:
.. math::
m \tau[K] \leq K,
where :math:`m` is a tuneable parameter (default = 5). If no such point
exists, then the given data set it too short to estimate the ACL; in this
case ``inf`` is returned.
This algorithm for computing the ACL is taken from:
N. Madras and A.D. Sokal, J. Stat. Phys. 50, 109 (1988).
Parameters
-----------
data : TimeSeries or array
A TimeSeries of data.
m : int
The number of autocorrelation lengths to use for determining the window
size :math:`K` (see above).
dtype : int or float
The datatype of the output. If the dtype was set to int, then the
ceiling is returned.
Returns
-------
acl : int or float
The autocorrelation length. If the ACL cannot be estimated, returns
``numpy.inf``.
### Response:
def calculate_acl(data, m=5, dtype=int):
r"""Calculates the autocorrelation length (ACL).
Given a normalized autocorrelation function :math:`\rho[i]` (by normalized,
we mean that :math:`\rho[0] = 1`), the ACL :math:`\tau` is:
.. math::
\tau = 1 + 2 \sum_{i=1}^{K} \rho[i].
The number of samples used :math:`K` is found by using the first point
such that:
.. math::
m \tau[K] \leq K,
where :math:`m` is a tuneable parameter (default = 5). If no such point
exists, then the given data set it too short to estimate the ACL; in this
case ``inf`` is returned.
This algorithm for computing the ACL is taken from:
N. Madras and A.D. Sokal, J. Stat. Phys. 50, 109 (1988).
Parameters
-----------
data : TimeSeries or array
A TimeSeries of data.
m : int
The number of autocorrelation lengths to use for determining the window
size :math:`K` (see above).
dtype : int or float
The datatype of the output. If the dtype was set to int, then the
ceiling is returned.
Returns
-------
acl : int or float
The autocorrelation length. If the ACL cannot be estimated, returns
``numpy.inf``.
"""
# sanity check output data type
if dtype not in [int, float]:
raise ValueError("The dtype must be either int or float.")
# if we have only a single point, just return 1
if len(data) < 2:
return 1
# calculate ACF that is normalized by the zero-lag value
acf = calculate_acf(data)
cacf = 2 * acf.numpy().cumsum() - 1
win = m * cacf <= numpy.arange(len(cacf))
if win.any():
acl = cacf[numpy.where(win)[0][0]]
if dtype == int:
acl = int(numpy.ceil(acl))
else:
acl = numpy.inf
return acl |
def new(cls, arg):
"""
Creates a new Parameter object from the given ParameterArgument.
"""
content = None
if arg.kind == 'file':
if os.path.exists(arg.value):
with open(arg.value, 'r') as f:
content = f.read()
else:
raise Exception('File does not exist: {}'.format(arg.value))
elif arg.kind == 'cli':
content = arg.value
for source_cls in cls.sources:
if source_cls.supports_source(arg):
return source_cls(content)
msg = 'Unsupported Parameter Source "{}"'
raise Execption(msg.format(arg.value)) | Creates a new Parameter object from the given ParameterArgument. | Below is the the instruction that describes the task:
### Input:
Creates a new Parameter object from the given ParameterArgument.
### Response:
def new(cls, arg):
"""
Creates a new Parameter object from the given ParameterArgument.
"""
content = None
if arg.kind == 'file':
if os.path.exists(arg.value):
with open(arg.value, 'r') as f:
content = f.read()
else:
raise Exception('File does not exist: {}'.format(arg.value))
elif arg.kind == 'cli':
content = arg.value
for source_cls in cls.sources:
if source_cls.supports_source(arg):
return source_cls(content)
msg = 'Unsupported Parameter Source "{}"'
raise Execption(msg.format(arg.value)) |
def get_event_hub(self, hub_name):
'''
Retrieves an existing event hub.
hub_name:
Name of the event hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + ''
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response) | Retrieves an existing event hub.
hub_name:
Name of the event hub. | Below is the the instruction that describes the task:
### Input:
Retrieves an existing event hub.
hub_name:
Name of the event hub.
### Response:
def get_event_hub(self, hub_name):
'''
Retrieves an existing event hub.
hub_name:
Name of the event hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + ''
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response) |
def send_email(subsystem, sender, recipients, subject, html_body=None, text_body=None, message_uuid=None):
"""Send an email to a list of recipients using the system configured email method (SES or SMTP)
Args:
subsystem (str): Name of the subsystem where the email originated from
sender (str): From email address
recipients (`list` of `str`): List of recipient email addresses
subject (str): Subject of the email
html_body (str): HTML body of the email
text_body (str): Text body of the email
message_uuid (str): Optional UUID message identifier. If not provided one will be generated automatically
Returns:
`None`
"""
if type(recipients) == str:
recipients = [recipients]
recipients = list(set(recipients))
send_notification(
subsystem=subsystem,
recipients=[NotificationContact('email', x) for x in recipients],
subject=subject,
body_html=html_body,
body_text=text_body
) | Send an email to a list of recipients using the system configured email method (SES or SMTP)
Args:
subsystem (str): Name of the subsystem where the email originated from
sender (str): From email address
recipients (`list` of `str`): List of recipient email addresses
subject (str): Subject of the email
html_body (str): HTML body of the email
text_body (str): Text body of the email
message_uuid (str): Optional UUID message identifier. If not provided one will be generated automatically
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
Send an email to a list of recipients using the system configured email method (SES or SMTP)
Args:
subsystem (str): Name of the subsystem where the email originated from
sender (str): From email address
recipients (`list` of `str`): List of recipient email addresses
subject (str): Subject of the email
html_body (str): HTML body of the email
text_body (str): Text body of the email
message_uuid (str): Optional UUID message identifier. If not provided one will be generated automatically
Returns:
`None`
### Response:
def send_email(subsystem, sender, recipients, subject, html_body=None, text_body=None, message_uuid=None):
"""Send an email to a list of recipients using the system configured email method (SES or SMTP)
Args:
subsystem (str): Name of the subsystem where the email originated from
sender (str): From email address
recipients (`list` of `str`): List of recipient email addresses
subject (str): Subject of the email
html_body (str): HTML body of the email
text_body (str): Text body of the email
message_uuid (str): Optional UUID message identifier. If not provided one will be generated automatically
Returns:
`None`
"""
if type(recipients) == str:
recipients = [recipients]
recipients = list(set(recipients))
send_notification(
subsystem=subsystem,
recipients=[NotificationContact('email', x) for x in recipients],
subject=subject,
body_html=html_body,
body_text=text_body
) |
def push_current(self, project):
"""Temporary changes the current project to 'project'. Should
be followed by 'pop-current'."""
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
self.saved_current_project.append(self.current_project)
self.current_project = project | Temporary changes the current project to 'project'. Should
be followed by 'pop-current'. | Below is the the instruction that describes the task:
### Input:
Temporary changes the current project to 'project'. Should
be followed by 'pop-current'.
### Response:
def push_current(self, project):
"""Temporary changes the current project to 'project'. Should
be followed by 'pop-current'."""
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
self.saved_current_project.append(self.current_project)
self.current_project = project |
def search_all(self, collection, job_id=None):
'''a "show all" search that doesn't require a query
the user is shown URLs to
'''
results = [['job_id', 'browser']]
url = "%s/projects/%s/jobs" %(self.api_base,
quote_plus(collection.strip('/')))
response = requests.get(url, headers=self.headers)
if response.status_code == 200:
jobs = response.json()
# We can't get a listing of artifacts
# https://gitlab.com/gitlab-org/gitlab-ce/issues/51515
# Parse through jobs (each can have different tags for a collection):
for job in jobs:
# Only show jobs that are successful
if job['status'] == 'success':
name = job['name']
for artifact in job['artifacts']:
if artifact['filename'].endswith('zip'):
# The user must browse to see the names
artifact_url = ("%s/%s/-/jobs/%s/artifacts/browse/%s"
%(self.base ,
collection,
job['id'],
name))
results.append([str(job['id']), artifact_url])
if len(results) == 1:
bot.info("No potential archives found in artifacts.")
sys.exit(0)
bot.info("Artifact Browsers (you will need path and job id for pull)")
bot.table(results)
return results | a "show all" search that doesn't require a query
the user is shown URLs to | Below is the the instruction that describes the task:
### Input:
a "show all" search that doesn't require a query
the user is shown URLs to
### Response:
def search_all(self, collection, job_id=None):
'''a "show all" search that doesn't require a query
the user is shown URLs to
'''
results = [['job_id', 'browser']]
url = "%s/projects/%s/jobs" %(self.api_base,
quote_plus(collection.strip('/')))
response = requests.get(url, headers=self.headers)
if response.status_code == 200:
jobs = response.json()
# We can't get a listing of artifacts
# https://gitlab.com/gitlab-org/gitlab-ce/issues/51515
# Parse through jobs (each can have different tags for a collection):
for job in jobs:
# Only show jobs that are successful
if job['status'] == 'success':
name = job['name']
for artifact in job['artifacts']:
if artifact['filename'].endswith('zip'):
# The user must browse to see the names
artifact_url = ("%s/%s/-/jobs/%s/artifacts/browse/%s"
%(self.base ,
collection,
job['id'],
name))
results.append([str(job['id']), artifact_url])
if len(results) == 1:
bot.info("No potential archives found in artifacts.")
sys.exit(0)
bot.info("Artifact Browsers (you will need path and job id for pull)")
bot.table(results)
return results |
def register_plugin(cls, model, plugin):
'''
Reguster a plugin for the model.
The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin.
'''
logger.info("Plugin registered for %s: %s", model, plugin)
cls.plugins[model] = plugin | Reguster a plugin for the model.
The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin. | Below is the the instruction that describes the task:
### Input:
Reguster a plugin for the model.
The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin.
### Response:
def register_plugin(cls, model, plugin):
'''
Reguster a plugin for the model.
The only one plugin can be registered. If you want to combine plugins, use CompoundPlugin.
'''
logger.info("Plugin registered for %s: %s", model, plugin)
cls.plugins[model] = plugin |
def preprocess_record(self, pid, record, links_factory=None, **kwargs):
"""Prepare a record and persistent identifier for serialization."""
links_factory = links_factory or (lambda x, record=None, **k: dict())
metadata = copy.deepcopy(record.replace_refs()) if self.replace_refs \
else record.dumps()
return dict(
pid=pid,
metadata=metadata,
links=links_factory(pid, record=record, **kwargs),
revision=record.revision_id,
created=(pytz.utc.localize(record.created).isoformat()
if record.created else None),
updated=(pytz.utc.localize(record.updated).isoformat()
if record.updated else None),
) | Prepare a record and persistent identifier for serialization. | Below is the the instruction that describes the task:
### Input:
Prepare a record and persistent identifier for serialization.
### Response:
def preprocess_record(self, pid, record, links_factory=None, **kwargs):
"""Prepare a record and persistent identifier for serialization."""
links_factory = links_factory or (lambda x, record=None, **k: dict())
metadata = copy.deepcopy(record.replace_refs()) if self.replace_refs \
else record.dumps()
return dict(
pid=pid,
metadata=metadata,
links=links_factory(pid, record=record, **kwargs),
revision=record.revision_id,
created=(pytz.utc.localize(record.created).isoformat()
if record.created else None),
updated=(pytz.utc.localize(record.updated).isoformat()
if record.updated else None),
) |
def filter_entries(entries, filters, exclude):
"""
Filters a list of host entries according to the given filters.
:param entries: A list of host entries.
:type entries: [:py:class:`HostEntry`]
:param filters: Regexes that must match a `HostEntry`.
:type filters: [``str``]
:param exclude: Regexes that must NOT match a `HostEntry`.
:type exclude: [``str``]
:return: The filtered list of host entries.
:rtype: [:py:class:`HostEntry`]
"""
filtered = [entry
for entry in entries
if all(entry.matches(f) for f in filters)
and not any(entry.matches(e) for e in exclude)]
return filtered | Filters a list of host entries according to the given filters.
:param entries: A list of host entries.
:type entries: [:py:class:`HostEntry`]
:param filters: Regexes that must match a `HostEntry`.
:type filters: [``str``]
:param exclude: Regexes that must NOT match a `HostEntry`.
:type exclude: [``str``]
:return: The filtered list of host entries.
:rtype: [:py:class:`HostEntry`] | Below is the the instruction that describes the task:
### Input:
Filters a list of host entries according to the given filters.
:param entries: A list of host entries.
:type entries: [:py:class:`HostEntry`]
:param filters: Regexes that must match a `HostEntry`.
:type filters: [``str``]
:param exclude: Regexes that must NOT match a `HostEntry`.
:type exclude: [``str``]
:return: The filtered list of host entries.
:rtype: [:py:class:`HostEntry`]
### Response:
def filter_entries(entries, filters, exclude):
"""
Filters a list of host entries according to the given filters.
:param entries: A list of host entries.
:type entries: [:py:class:`HostEntry`]
:param filters: Regexes that must match a `HostEntry`.
:type filters: [``str``]
:param exclude: Regexes that must NOT match a `HostEntry`.
:type exclude: [``str``]
:return: The filtered list of host entries.
:rtype: [:py:class:`HostEntry`]
"""
filtered = [entry
for entry in entries
if all(entry.matches(f) for f in filters)
and not any(entry.matches(e) for e in exclude)]
return filtered |
def exists(self, pattern, seconds=None):
""" Searches for an image pattern in the given region
Returns Match if pattern exists, None otherwise (does not throw exception)
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
find_time = time.time()
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
if seconds is None:
seconds = self.autoWaitTimeout
if isinstance(pattern, int):
# Actually just a "wait" statement
time.sleep(pattern)
return
if not pattern:
time.sleep(seconds)
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
if needle is None:
raise ValueError("Unable to load image '{}'".format(pattern.path))
needle_height, needle_width, needle_channels = needle.shape
match = None
timeout = time.time() + seconds
# Consult TemplateMatcher to find needle
while not match:
matcher = TemplateMatcher(r.getBitmap())
match = matcher.findBestMatch(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if time.time() > timeout:
break
if match is None:
Debug.info("Couldn't find '{}' with enough similarity.".format(pattern.path))
return None
# Translate local position into global screen position
position, confidence = match
position = (position[0] + self.x, position[1] + self.y)
self._lastMatch = Match(
confidence,
pattern.offset,
(position, (needle_width, needle_height)))
#self._lastMatch.debug_preview()
Debug.info("Found match for pattern '{}' at ({},{}) with confidence ({}). Target at ({},{})".format(
pattern.path,
self._lastMatch.getX(),
self._lastMatch.getY(),
self._lastMatch.getScore(),
self._lastMatch.getTarget().x,
self._lastMatch.getTarget().y))
self._lastMatchTime = (time.time() - find_time) * 1000 # Capture find time in milliseconds
return self._lastMatch | Searches for an image pattern in the given region
Returns Match if pattern exists, None otherwise (does not throw exception)
Sikuli supports OCR search with a text parameter. This does not (yet). | Below is the the instruction that describes the task:
### Input:
Searches for an image pattern in the given region
Returns Match if pattern exists, None otherwise (does not throw exception)
Sikuli supports OCR search with a text parameter. This does not (yet).
### Response:
def exists(self, pattern, seconds=None):
""" Searches for an image pattern in the given region
Returns Match if pattern exists, None otherwise (does not throw exception)
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
find_time = time.time()
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
if seconds is None:
seconds = self.autoWaitTimeout
if isinstance(pattern, int):
# Actually just a "wait" statement
time.sleep(pattern)
return
if not pattern:
time.sleep(seconds)
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
if needle is None:
raise ValueError("Unable to load image '{}'".format(pattern.path))
needle_height, needle_width, needle_channels = needle.shape
match = None
timeout = time.time() + seconds
# Consult TemplateMatcher to find needle
while not match:
matcher = TemplateMatcher(r.getBitmap())
match = matcher.findBestMatch(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if time.time() > timeout:
break
if match is None:
Debug.info("Couldn't find '{}' with enough similarity.".format(pattern.path))
return None
# Translate local position into global screen position
position, confidence = match
position = (position[0] + self.x, position[1] + self.y)
self._lastMatch = Match(
confidence,
pattern.offset,
(position, (needle_width, needle_height)))
#self._lastMatch.debug_preview()
Debug.info("Found match for pattern '{}' at ({},{}) with confidence ({}). Target at ({},{})".format(
pattern.path,
self._lastMatch.getX(),
self._lastMatch.getY(),
self._lastMatch.getScore(),
self._lastMatch.getTarget().x,
self._lastMatch.getTarget().y))
self._lastMatchTime = (time.time() - find_time) * 1000 # Capture find time in milliseconds
return self._lastMatch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.