code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
"""ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
# between the same nodes (with only one edge of each type. adjacency_matrix
# will need to be converted to shape [B, T, N, N].
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
]) # Shape [B, N, T, V].
v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V].
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output
|
ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
|
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
|
Shift the second dimension of x right by one.
|
def compute(self):
"""Compute and return the signature according to the given data."""
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: '%s'" % version)
return self.creds.sign(bytes, hash_type)
|
Compute and return the signature according to the given data.
|
def setTau(self, vehID, tau):
"""setTau(string, double) -> None
Sets the driver's tau-parameter (reaction time or anticipation time depending on the car-following model) in s for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_TAU, vehID, tau)
|
setTau(string, double) -> None
Sets the driver's tau-parameter (reaction time or anticipation time depending on the car-following model) in s for this vehicle.
|
def split_filename(pathname):
"""
@type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return: Tuple containing the path to the file and the base filename.
"""
filepart = win32.PathFindFileName(pathname)
pathpart = win32.PathRemoveFileSpec(pathname)
return (pathpart, filepart)
|
@type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return: Tuple containing the path to the file and the base filename.
|
def get_source_from_contracts_list(self, contracts):
"""
get the source data from the contracts list
:param contracts: the list of contracts
:return:
"""
if contracts is None or len(contracts) == 0:
return
if isinstance(contracts[0], SolidityContract):
self.source_type = "solidity-file"
self.source_format = "text"
for contract in contracts:
self.source_list += [file.filename for file in contract.solidity_files]
self._source_hash.append(contract.bytecode_hash)
self._source_hash.append(contract.creation_bytecode_hash)
elif isinstance(contracts[0], EVMContract):
self.source_format = "evm-byzantium-bytecode"
self.source_type = (
"ethereum-address"
if len(contracts[0].name) == 42 and contracts[0].name[0:2] == "0x"
else "raw-bytecode"
)
for contract in contracts:
if contract.creation_code:
self.source_list.append(contract.creation_bytecode_hash)
if contract.code:
self.source_list.append(contract.bytecode_hash)
self._source_hash = self.source_list
else:
assert False
|
get the source data from the contracts list
:param contracts: the list of contracts
:return:
|
def get_relationship_query_session_for_family(self, family_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the relationship query service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the family
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.RelationshipQuerySession) - a
``RelationshipQuerySession``
raise: NotFound - no ``Family`` found by the given ``Id``
raise: NullArgument - ``family_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_relationship_query()``
and ``supports_visible_federation()`` are ``true``*
"""
if not family_id:
raise NullArgument
if not self.supports_relationship_query():
raise Unimplemented()
##
# Need to include check to see if the familyId is found otherwise raise NotFound
##
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.RelationshipQuerySession(family_id, proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the ``OsidSession`` associated with the relationship query service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the family
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.RelationshipQuerySession) - a
``RelationshipQuerySession``
raise: NotFound - no ``Family`` found by the given ``Id``
raise: NullArgument - ``family_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_relationship_query()``
and ``supports_visible_federation()`` are ``true``*
|
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See documentation for method `GroundShakingIntensityModel` in
:class:~`openquake.hazardlib.gsim.base.GSIM`
"""
mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt,
stddev_types)
stddevs = [np.ones(len(dists.repi))*get_sigma(imt)]
return mean, stddevs
|
See documentation for method `GroundShakingIntensityModel` in
:class:~`openquake.hazardlib.gsim.base.GSIM`
|
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._uuid is not None:
return False
if self._avatar is not None:
return False
if self._public_nick_name is not None:
return False
if self._display_name is not None:
return False
if self._country is not None:
return False
return True
|
:rtype: bool
|
def _countOverlap(rep1, rep2):
"""
Return the overlap between two representations. rep1 and rep2 are lists of
non-zero indices.
"""
overlap = 0
for e in rep1:
if e in rep2:
overlap += 1
return overlap
|
Return the overlap between two representations. rep1 and rep2 are lists of
non-zero indices.
|
def list_campaigns(self, **kwargs):
"""List all update campaigns.
:param int limit: number of campaigns to retrieve
:param str order: sort direction of campaigns when ordered by creation time (desc|asc)
:param str after: get campaigns after given campaign ID
:param dict filters: Dictionary of filters to apply
:return: List of :py:class:`Campaign` objects
:rtype: PaginatedResponse
"""
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, Campaign, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.update_campaign_list, lwrap_type=Campaign, **kwargs)
|
List all update campaigns.
:param int limit: number of campaigns to retrieve
:param str order: sort direction of campaigns when ordered by creation time (desc|asc)
:param str after: get campaigns after given campaign ID
:param dict filters: Dictionary of filters to apply
:return: List of :py:class:`Campaign` objects
:rtype: PaginatedResponse
|
def _iterbfs(self, start, end=None, forward=True):
"""
The forward parameter specifies whether it is a forward or backward
traversal. Returns a list of tuples where the first value is the hop
value the second value is the node id.
"""
queue, visited = deque([(start, 0)]), set([start])
# the direction of the bfs depends on the edges that are sampled
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while queue:
curr_node, curr_step = queue.popleft()
yield (curr_node, curr_step)
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
queue.append((tail, curr_step + 1))
|
The forward parameter specifies whether it is a forward or backward
traversal. Returns a list of tuples where the first value is the hop
value the second value is the node id.
|
def moreData(ra,dec,box):
"""Search the CFHT archive for more images of this location"""
import cfhtCutout
cdata={'ra_deg': ra, 'dec_deg': dec, 'radius_deg': 0.2}
inter=cfhtCutout.find_images(cdata,0.2)
|
Search the CFHT archive for more images of this location
|
def get(self, key):
'''
Get a value for a given key
:param key: entry's key
:return: corresponding value
'''
if key in self._data_fields:
return self._data_fields[key]
if key in self._sub_reports:
return self._sub_reports[key]
return None
|
Get a value for a given key
:param key: entry's key
:return: corresponding value
|
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base.
'''
self.secrets = read_client_secrets()
if self.secrets is not None:
if "registry" in self.secrets:
if "base" in self.secrets['registry']:
self.base = self.secrets['registry']['base']
self._update_base()
|
update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base.
|
def averageValues(self):
"""
return the averaged values in the grid
"""
assert self.opts['record_density'] and self.opts['method'] == 'sum'
# dont increase value of partly filled cells (density 0..1):
filled = self.density > 1
v = self.values.copy()
v[filled] /= self.density[filled]
# ONLY AS OPTION??:
v[~filled] *= self.density[~filled]
return v
|
return the averaged values in the grid
|
def pull_tar(url, name, verify=False):
'''
Execute a ``machinectl pull-raw`` to download a .tar container image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_tar http://foo.domain.tld/containers/archlinux-2015.02.01.tar.gz arch2
'''
return _pull_image('tar', url, name, verify=verify)
|
Execute a ``machinectl pull-raw`` to download a .tar container image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_tar http://foo.domain.tld/containers/archlinux-2015.02.01.tar.gz arch2
|
def _(f, x):
"""
fmap for dict like, not `f` should have signature `f::key->value->(key, value)`
"""
result = {}
for k, v in x.items():
k_, v_ = f(k, v)
result[k_] = v_
return result
|
fmap for dict like, not `f` should have signature `f::key->value->(key, value)`
|
def _get_driver(self):
"""Get authenticated GCE driver."""
ComputeEngine = get_driver(Provider.GCE)
return ComputeEngine(
self.service_account_email,
self.service_account_file,
project=self.service_account_project
)
|
Get authenticated GCE driver.
|
def create_weapon_layer(weapon, hashcode, isSecond=False):
"""Creates the layer for weapons."""
return pgnreader.parse_pagan_file(('%s%spgn%s' % (PACKAGE_DIR, os.sep, os.sep)) + weapon + '.pgn', hashcode, sym=False, invert=isSecond)
|
Creates the layer for weapons.
|
def decimal(self, prompt, default=None, lower=None, upper=None):
"""Prompts user to input decimal, with optional default and bounds."""
prompt = prompt if prompt is not None else "Enter a decimal number"
prompt += " [{0}]: ".format(default) if default is not None else ': '
return self.input(
curry(filter_decimal, default=default, lower=lower, upper=upper),
prompt
)
|
Prompts user to input decimal, with optional default and bounds.
|
def exit_config_mode(self, exit_config="exit configuration-mode"):
"""Exit configuration mode."""
output = ""
if self.check_config_mode():
output = self.send_command_timing(
exit_config, strip_prompt=False, strip_command=False
)
# if 'Exit with uncommitted changes?' in output:
if "uncommitted changes" in output:
output += self.send_command_timing(
"yes", strip_prompt=False, strip_command=False
)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
|
Exit configuration mode.
|
def insert(self, context, plan):
"""
Include an insert operation to the given plan.
:param execution.Context context:
Current execution context.
:param list plan:
List of :class:`execution.Operation` instances.
"""
op = execution.Insert(self.__comp_name, self.__comp())
if op not in plan and self.available(context) != True:
for dep_stub in self.dependencies():
dep_stub.insert(context, plan)
plan.append(op)
|
Include an insert operation to the given plan.
:param execution.Context context:
Current execution context.
:param list plan:
List of :class:`execution.Operation` instances.
|
def run(**kwargs):
'''
Run a single module function or a range of module functions in a batch.
Supersedes ``module.run`` function, which requires ``m_`` prefix to
function-specific parameters.
:param returner:
Specify a common returner for the whole batch to send the return data
:param kwargs:
Pass any arguments needed to execute the function(s)
.. code-block:: yaml
some_id_of_state:
module.run:
- network.ip_addrs:
- interface: eth0
- cloud.create:
- names:
- test-isbm-1
- test-isbm-2
- ssh_username: sles
- image: sles12sp2
- securitygroup: default
- size: 'c3.large'
- location: ap-northeast-1
- delvol_on_destroy: True
:return:
'''
if 'name' in kwargs:
kwargs.pop('name')
ret = {
'name': list(kwargs),
'changes': {},
'comment': '',
'result': None,
}
functions = [func for func in kwargs if '.' in func]
missing = []
tests = []
for func in functions:
func = func.split(':')[0]
if func not in __salt__:
missing.append(func)
elif __opts__['test']:
tests.append(func)
if tests or missing:
ret['comment'] = ' '.join([
missing and "Unavailable function{plr}: "
"{func}.".format(plr=(len(missing) > 1 or ''),
func=(', '.join(missing) or '')) or '',
tests and "Function{plr} {func} to be "
"executed.".format(plr=(len(tests) > 1 or ''),
func=(', '.join(tests)) or '') or '',
]).strip()
ret['result'] = not (missing or not tests)
if ret['result'] is None:
ret['result'] = True
failures = []
success = []
for func in functions:
_func = func.split(':')[0]
try:
func_ret = _call_function(_func, returner=kwargs.get('returner'),
func_args=kwargs.get(func))
if not _get_result(func_ret, ret['changes'].get('ret', {})):
if isinstance(func_ret, dict):
failures.append("'{0}' failed: {1}".format(
func, func_ret.get('comment', '(error message N/A)')))
else:
success.append('{0}: {1}'.format(
func, func_ret.get('comment', 'Success') if isinstance(func_ret, dict) else func_ret))
ret['changes'][func] = func_ret
except (SaltInvocationError, TypeError) as ex:
failures.append("'{0}' failed: {1}".format(func, ex))
ret['comment'] = ', '.join(failures + success)
ret['result'] = not bool(failures)
return ret
|
Run a single module function or a range of module functions in a batch.
Supersedes ``module.run`` function, which requires ``m_`` prefix to
function-specific parameters.
:param returner:
Specify a common returner for the whole batch to send the return data
:param kwargs:
Pass any arguments needed to execute the function(s)
.. code-block:: yaml
some_id_of_state:
module.run:
- network.ip_addrs:
- interface: eth0
- cloud.create:
- names:
- test-isbm-1
- test-isbm-2
- ssh_username: sles
- image: sles12sp2
- securitygroup: default
- size: 'c3.large'
- location: ap-northeast-1
- delvol_on_destroy: True
:return:
|
def rollback(self):
"""
It will rollback all changes and go to the *original_config*
"""
logger.info('Rolling back changes')
config_text = self.compare_config(other=self.original_config)
if len(config_text) > 0:
return self._commit(config_text, force=True, reload_original_config=False)
|
It will rollback all changes and go to the *original_config*
|
def set_default(self):
"""Set config to default."""
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
self._config = configparser.RawConfigParser()
self._config.add_section('Settings')
for key, val in self.DEFAULTS.items():
self._config.set('Settings', key, val)
with open(self._configfile, 'w') as f:
self._config.write(f)
|
Set config to default.
|
def smooth(x0, rho, gamma, axis=0):
"""
Proximal operator for a smoothing function enforced via the discrete laplacian operator
Notes
-----
Currently only works with matrices (2-D arrays) as input
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
"""
# Apply Laplacian smoothing
n = x0.shape[axis]
lap_op = spdiags([(2 + rho / gamma) * np.ones(n), -1 * np.ones(n), -1 * np.ones(n)], [0, -1, 1], n, n, format='csc')
x_out = np.rollaxis(spsolve(gamma * lap_op, rho * np.rollaxis(x0, axis, 0)), axis, 0)
return x_out
|
Proximal operator for a smoothing function enforced via the discrete laplacian operator
Notes
-----
Currently only works with matrices (2-D arrays) as input
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
|
def add_sign(xml, key, cert, debug=False, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1, digest_algorithm=OneLogin_Saml2_Constants.SHA1):
"""
Adds signature key and senders certificate to an element (Message or
Assertion).
:param xml: The element we should sign
:type: string | Document
:param key: The private key
:type: string
:param cert: The public
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
:returns: Signed XML
:rtype: string
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elem = OneLogin_Saml2_XML.to_etree(xml)
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.Transform.RSA_SHA1)
signature = xmlsec.template.create(elem, xmlsec.Transform.EXCL_C14N, sign_algorithm_transform, ns='ds')
issuer = OneLogin_Saml2_XML.query(elem, '//saml:Issuer')
if len(issuer) > 0:
issuer = issuer[0]
issuer.addnext(signature)
elem_to_sign = issuer.getparent()
else:
entity_descriptor = OneLogin_Saml2_XML.query(elem, '//md:EntityDescriptor')
if len(entity_descriptor) > 0:
elem.insert(0, signature)
else:
elem[0].insert(0, signature)
elem_to_sign = elem
elem_id = elem_to_sign.get('ID', None)
if elem_id is not None:
if elem_id:
elem_id = '#' + elem_id
else:
generated_id = generated_id = OneLogin_Saml2_Utils.generate_unique_id()
elem_id = '#' + generated_id
elem_to_sign.attrib['ID'] = generated_id
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem_to_sign, ["ID"])
digest_algorithm_transform_map = {
OneLogin_Saml2_Constants.SHA1: xmlsec.Transform.SHA1,
OneLogin_Saml2_Constants.SHA256: xmlsec.Transform.SHA256,
OneLogin_Saml2_Constants.SHA384: xmlsec.Transform.SHA384,
OneLogin_Saml2_Constants.SHA512: xmlsec.Transform.SHA512
}
digest_algorithm_transform = digest_algorithm_transform_map.get(digest_algorithm, xmlsec.Transform.SHA1)
ref = xmlsec.template.add_reference(signature, digest_algorithm_transform, uri=elem_id)
xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED)
xmlsec.template.add_transform(ref, xmlsec.Transform.EXCL_C14N)
key_info = xmlsec.template.ensure_key_info(signature)
xmlsec.template.add_x509_data(key_info)
dsig_ctx = xmlsec.SignatureContext()
sign_key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, None)
sign_key.load_cert_from_memory(cert, xmlsec.KeyFormat.PEM)
dsig_ctx.key = sign_key
dsig_ctx.sign(signature)
return OneLogin_Saml2_XML.to_string(elem)
|
Adds signature key and senders certificate to an element (Message or
Assertion).
:param xml: The element we should sign
:type: string | Document
:param key: The private key
:type: string
:param cert: The public
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
:returns: Signed XML
:rtype: string
|
def remove_functions(source, all_inline=False):
"""removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions"""
global INLINE_COUNT
inline = {}
hoisted = {}
n = 0
limit = len(source) - 9 # 8 is length of 'function'
res = ''
last = 0
while n < limit:
if n and source[n - 1] in IDENTIFIER_PART:
n += 1
continue
if source[n:n + 8] == 'function' and source[n +
8] not in IDENTIFIER_PART:
if source[:n].rstrip().endswith(
'.'): # allow function as a property name :)
n += 1
continue
if source[n + 8:].lstrip().startswith(
':'): # allow functions inside objects...
n += 1
continue
entered = n
res += source[last:n]
name = ''
n = pass_white(source, n + 8)
if source[n] in IDENTIFIER_START: # hoisted function
name, n = parse_identifier(source, n)
args, n = pass_bracket(source, n, '()')
if not args:
raise SyntaxError('Function misses bracket with argnames ()')
args = args.strip('() \n')
args = tuple(parse_identifier(e, 0)[0]
for e in argsplit(args)) if args else ()
if len(args) - len(set(args)):
# I know its legal in JS but python does not allow duplicate argnames
# I will not work around it
raise SyntaxError(
'Function has duplicate argument names. Its not legal in this implementation. Sorry.'
)
block, n = pass_bracket(source, n, '{}')
if not block:
raise SyntaxError(
'Function does not have any code block to execute')
mixed = False # named function expression flag
if name and not all_inline:
# Here I will distinguish between named function expression (mixed) and a function statement
before = source[:entered].rstrip()
if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):
#print 'Ended ith keyword'
mixed = True
elif before and before[-1] not in PRE_ALLOWED and not before[
-2:] in INCREMENTS:
#print 'Ended with'+repr(before[-1]), before[-1]=='}'
mixed = True
else:
#print 'FUNCTION STATEMENT'
#its a function statement.
# todo remove fucking label if present!
hoisted[name] = block, args
if not name or mixed or all_inline: # its a function expression (can be both named and not named)
#print 'FUNCTION EXPRESSION'
INLINE_COUNT += 1
iname = INLINE_NAME % INLINE_COUNT # inline name
res += ' ' + iname
inline['%s@%s' % (
iname, name
)] = block, args #here added real name at the end because it has to be added to the func scope
last = n
else:
n += 1
res += source[last:]
return res, hoisted, inline
|
removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions
|
def _baseattrs(self):
"""A dict of members expressed in literals"""
result = super()._baseattrs
result["params"] = ", ".join(self.parameters)
return result
|
A dict of members expressed in literals
|
def loggers(self):
"""Return all the loggers that should be activated"""
ret = []
if self.logger_name:
if isinstance(self.logger_name, logging.Logger):
ret.append((self.logger_name.name, self.logger_name))
else:
ret.append((self.logger_name, logging.getLogger(self.logger_name)))
else:
ret = list(logging.Logger.manager.loggerDict.items())
ret.append(("root", logging.getLogger()))
return ret
|
Return all the loggers that should be activated
|
def _detach(cls, disk_id):
""" Detach a disk from a vm. """
disk = cls._info(disk_id)
opers = []
if disk.get('vms_id'):
for vm_id in disk['vms_id']:
cls.echo('The disk is still attached to the vm %s.' % vm_id)
cls.echo('Will detach it.')
opers.append(cls.call('hosting.vm.disk_detach',
vm_id, disk_id))
return opers
|
Detach a disk from a vm.
|
def sponsored(self, **kwargs):
"""Search containing any sponsored pieces of Content."""
eqs = self.search(**kwargs)
eqs = eqs.filter(AllSponsored())
published_offset = getattr(settings, "RECENT_SPONSORED_OFFSET_HOURS", None)
if published_offset:
now = timezone.now()
eqs = eqs.filter(
Published(
after=now - timezone.timedelta(hours=published_offset),
before=now
)
)
return eqs
|
Search containing any sponsored pieces of Content.
|
def downgrade():
"""Downgrade database."""
op.drop_constraint(op.f('fk_oauth2server_token_user_id_accounts_user'),
'oauth2server_token', type_='foreignkey')
op.drop_index(op.f('ix_oauth2server_token_user_id'),
table_name='oauth2server_token')
op.create_foreign_key('fk_oauth2server_token_user_id_accounts_user',
'oauth2server_token', 'accounts_user', ['user_id'],
['id'])
op.drop_constraint(
op.f('fk_oauth2server_token_client_id_oauth2server_client'),
'oauth2server_token', type_='foreignkey')
op.drop_index(op.f('ix_oauth2server_token_client_id'),
table_name='oauth2server_token')
op.create_foreign_key(
'fk_oauth2server_token_client_id_oauth2server_client',
'oauth2server_token', 'oauth2server_client', ['client_id'],
['client_id'])
op.drop_constraint(op.f('fk_oauth2server_client_user_id_accounts_user'),
'oauth2server_client', type_='foreignkey')
op.drop_index(op.f('ix_oauth2server_client_user_id'),
table_name='oauth2server_client')
op.create_foreign_key('fk_oauth2server_client_user_id_accounts_user',
'oauth2server_client', 'accounts_user', ['user_id'],
['id'])
|
Downgrade database.
|
def sub_symbols(pattern, code, symbol):
"""Substitutes symbols in CLDR number pattern."""
return pattern.replace('¤¤', code).replace('¤', symbol)
|
Substitutes symbols in CLDR number pattern.
|
def albedo(self, value=999.0):
"""Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `albedo`'.format(value))
self._albedo = value
|
Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
def get(self, request, uri):
"""
List uri revisions.
JSON Response:
[[uri, state], ...]
"""
uri = self.decode_uri(uri)
revisions = cio.revisions(uri)
revisions = [list(revision) for revision in revisions] # Convert tuples to lists
return self.render_to_json(revisions)
|
List uri revisions.
JSON Response:
[[uri, state], ...]
|
def grant(self, lock, unit):
'''Maybe grant the lock to a unit.
The decision to grant the lock or not is made for $lock
by a corresponding method grant_$lock, which you may define
in a subclass. If no such method is defined, the default_grant
method is used. See Serial.default_grant() for details.
'''
if not hookenv.is_leader():
return False # Not the leader, so we cannot grant.
# Set of units already granted the lock.
granted = set()
for u in self.grants:
if lock in self.grants[u]:
granted.add(u)
if unit in granted:
return True # Already granted.
# Ordered list of units waiting for the lock.
reqs = set()
for u in self.requests:
if u in granted:
continue # In the granted set. Not wanted in the req list.
for _lock, ts in self.requests[u].items():
if _lock == lock:
reqs.add((ts, u))
queue = [t[1] for t in sorted(reqs)]
if unit not in queue:
return False # Unit has not requested the lock.
# Locate custom logic, or fallback to the default.
grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
if grant_func(lock, unit, granted, queue):
# Grant the lock.
self.msg('Leader grants {} to {}'.format(lock, unit))
self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
return True
return False
|
Maybe grant the lock to a unit.
The decision to grant the lock or not is made for $lock
by a corresponding method grant_$lock, which you may define
in a subclass. If no such method is defined, the default_grant
method is used. See Serial.default_grant() for details.
|
def write_summary(self, global_step, delta_train_start, lesson_num=0):
"""
Saves training statistics to Tensorboard.
:param delta_train_start: Time elapsed since training started.
:param lesson_num: Current lesson number in curriculum.
:param global_step: The number of steps the simulation has been going for
"""
if global_step % self.trainer_parameters['summary_freq'] == 0 and global_step != 0:
is_training = "Training." if self.is_training and self.get_step <= self.get_max_steps else "Not Training."
if len(self.stats['Environment/Cumulative Reward']) > 0:
mean_reward = np.mean(
self.stats['Environment/Cumulative Reward'])
LOGGER.info(" {}: {}: Step: {}. "
"Time Elapsed: {:0.3f} s "
"Mean "
"Reward: {"
":0.3f}. Std of Reward: {:0.3f}. {}"
.format(self.run_id, self.brain_name,
min(self.get_step, self.get_max_steps),
delta_train_start,
mean_reward, np.std(
self.stats['Environment/Cumulative Reward']),
is_training))
else:
LOGGER.info(" {}: {}: Step: {}. No episode was completed since last summary. {}"
.format(self.run_id, self.brain_name, self.get_step, is_training))
summary = tf.Summary()
for key in self.stats:
if len(self.stats[key]) > 0:
stat_mean = float(np.mean(self.stats[key]))
summary.value.add(tag='{}'.format(
key), simple_value=stat_mean)
self.stats[key] = []
summary.value.add(tag='Environment/Lesson', simple_value=lesson_num)
self.summary_writer.add_summary(summary, self.get_step)
self.summary_writer.flush()
|
Saves training statistics to Tensorboard.
:param delta_train_start: Time elapsed since training started.
:param lesson_num: Current lesson number in curriculum.
:param global_step: The number of steps the simulation has been going for
|
def dump_service(self, sc):
"""Read all data blocks of a given service.
:meth:`dump_service` reads all data blocks from the service
with service code *sc* and returns a list of strings suitable
for printing. The number of strings returned does not
necessarily reflect the number of data blocks because a range
of data blocks with equal content is reduced to fewer lines of
output.
"""
def lprint(fmt, data, index):
ispchr = lambda x: x >= 32 and x <= 126 # noqa: E731
def print_bytes(octets):
return ' '.join(['%02x' % x for x in octets])
def print_chars(octets):
return ''.join([chr(x) if ispchr(x) else '.' for x in octets])
return fmt.format(index, print_bytes(data), print_chars(data))
data_line_fmt = "{0:04X}: {1} |{2}|"
same_line_fmt = "{0:<4s} {1} |{2}|"
lines = list()
last_data = None
same_data = 0
for i in itertools.count(): # pragma: no branch
assert i < 0x10000
try:
this_data = self.read_without_encryption([sc], [BlockCode(i)])
except Type3TagCommandError:
i = i - 1
break
if this_data == last_data:
same_data += 1
else:
if same_data > 1:
lines.append(lprint(same_line_fmt, last_data, "*"))
lines.append(lprint(data_line_fmt, this_data, i))
last_data = this_data
same_data = 0
if same_data > 1:
lines.append(lprint(same_line_fmt, last_data, "*"))
if same_data > 0:
lines.append(lprint(data_line_fmt, this_data, i))
return lines
|
Read all data blocks of a given service.
:meth:`dump_service` reads all data blocks from the service
with service code *sc* and returns a list of strings suitable
for printing. The number of strings returned does not
necessarily reflect the number of data blocks because a range
of data blocks with equal content is reduced to fewer lines of
output.
|
def get_historical_klines(symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# create the Binance client, no need for api key
client = Client("", "")
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
|
Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
|
def get_profile_info(self, raw_token, profile_info_params={}):
"Fetch user profile information."
try:
response = self.request('get', self.provider.profile_url, token=raw_token, params=profile_info_params)
response.raise_for_status()
except RequestException as e:
logger.error('Unable to fetch user profile: {0}'.format(e))
return None
else:
return response.json() or response.text
|
Fetch user profile information.
|
def add_cookie_header(self, request, referrer_host=None):
'''Wrapped ``add_cookie_header``.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
'''
new_request = convert_http_request(request, referrer_host)
self._cookie_jar.add_cookie_header(new_request)
request.fields.clear()
for name, value in new_request.header_items():
request.fields.add(name, value)
|
Wrapped ``add_cookie_header``.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
|
def get_issues_in_queue(self, service_desk_id, queue_id, start=0, limit=50):
"""
Returns a page of issues inside a queue for a given queue ID.
Only fields that the queue is configured to show are returned.
For example, if a queue is configured to show only Description and Due Date,
then only those two fields are returned for each issue in the queue.
Permissions: The calling user must have permission to view the requested queue,
i.e. they must be an agent of the service desk that the queue belongs to.
:param service_desk_id: str
:param queue_id: str
:param start: int
:param limit: int
:return: a page of issues
"""
url = 'rest/servicedeskapi/servicedesk/{0}/queue/{1}/issue'.format(service_desk_id, queue_id)
params = {}
if start is not None:
params['start'] = int(start)
if limit is not None:
params['limit'] = int(limit)
return self.get(url, headers=self.experimental_headers, params=params)
|
Returns a page of issues inside a queue for a given queue ID.
Only fields that the queue is configured to show are returned.
For example, if a queue is configured to show only Description and Due Date,
then only those two fields are returned for each issue in the queue.
Permissions: The calling user must have permission to view the requested queue,
i.e. they must be an agent of the service desk that the queue belongs to.
:param service_desk_id: str
:param queue_id: str
:param start: int
:param limit: int
:return: a page of issues
|
def check_auth(self, username, password):
"""Check if a username/password combination is valid."""
if username == self.args.username:
from glances.password import GlancesPassword
pwd = GlancesPassword()
return pwd.check_password(self.args.password, pwd.sha256_hash(password))
else:
return False
|
Check if a username/password combination is valid.
|
def list_load_areas(self, session, mv_districts):
"""list load_areas (load areas) peak load from database for a single MV grid_district
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_districts:
List of MV districts
"""
# threshold: load area peak load, if peak load < threshold => disregard
# load area
lv_loads_threshold = cfg_ding0.get('mv_routing', 'load_area_threshold')
#lv_loads_threshold = 0
gw2kw = 10 ** 6 # load in database is in GW -> scale to kW
#filter list for only desired MV districts
stations_list = [d.mv_grid._station.id_db for d in mv_districts]
# build SQL query
lv_load_areas_sqla = session.query(
self.orm['orm_lv_load_areas'].id.label('id_db'),
(self.orm['orm_lv_load_areas'].sector_peakload_residential * gw2kw).\
label('peak_load_residential'),
(self.orm['orm_lv_load_areas'].sector_peakload_retail * gw2kw).\
label('peak_load_retail'),
(self.orm['orm_lv_load_areas'].sector_peakload_industrial * gw2kw).\
label('peak_load_industrial'),
(self.orm['orm_lv_load_areas'].sector_peakload_agricultural * gw2kw).\
label('peak_load_agricultural'),
#self.orm['orm_lv_load_areas'].subst_id
). \
filter(self.orm['orm_lv_load_areas'].subst_id.in_(stations_list)).\
filter(((self.orm['orm_lv_load_areas'].sector_peakload_residential # only pick load areas with peak load > lv_loads_threshold
+ self.orm['orm_lv_load_areas'].sector_peakload_retail
+ self.orm['orm_lv_load_areas'].sector_peakload_industrial
+ self.orm['orm_lv_load_areas'].sector_peakload_agricultural)
* gw2kw) > lv_loads_threshold). \
filter(self.orm['version_condition_la'])
# read data from db
lv_load_areas = pd.read_sql_query(lv_load_areas_sqla.statement,
session.bind,
index_col='id_db')
return lv_load_areas
|
list load_areas (load areas) peak load from database for a single MV grid_district
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_districts:
List of MV districts
|
def json_files_serializer(objs, status=None):
"""JSON Files Serializer.
:parma objs: A list of:class:`invenio_files_rest.models.ObjectVersion`
instances.
:param status: A HTTP Status. (Default: ``None``)
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
files = [file_serializer(obj) for obj in objs]
return make_response(json.dumps(files), status)
|
JSON Files Serializer.
:parma objs: A list of:class:`invenio_files_rest.models.ObjectVersion`
instances.
:param status: A HTTP Status. (Default: ``None``)
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
|
def _validate_schema(self, schema, field, value):
""" {'type': ['dict', 'string'],
'anyof': [{'validator': 'schema'},
{'validator': 'bulk_schema'}]} """
if schema is None:
return
if isinstance(value, Sequence) and not isinstance(value, _str_type):
self.__validate_schema_sequence(field, schema, value)
elif isinstance(value, Mapping):
self.__validate_schema_mapping(field, schema, value)
|
{'type': ['dict', 'string'],
'anyof': [{'validator': 'schema'},
{'validator': 'bulk_schema'}]}
|
def sun_ra_dec(utc_time):
"""Right ascension and declination of the sun at *utc_time*.
"""
jdate = jdays2000(utc_time) / 36525.0
eps = np.deg2rad(23.0 + 26.0 / 60.0 + 21.448 / 3600.0 -
(46.8150 * jdate + 0.00059 * jdate * jdate -
0.001813 * jdate * jdate * jdate) / 3600)
eclon = sun_ecliptic_longitude(utc_time)
x__ = np.cos(eclon)
y__ = np.cos(eps) * np.sin(eclon)
z__ = np.sin(eps) * np.sin(eclon)
r__ = np.sqrt(1.0 - z__ * z__)
# sun declination
declination = np.arctan2(z__, r__)
# right ascension
right_ascension = 2 * np.arctan2(y__, (x__ + r__))
return right_ascension, declination
|
Right ascension and declination of the sun at *utc_time*.
|
def sort(self, col: str):
"""
Sorts the main dataframe according to the given column
:param col: column name
:type col: str
:example: ``ds.sort("Col 1")``
"""
try:
self.df = self.df.copy().sort_values(col)
except Exception as e:
self.err(e, "Can not sort the dataframe from column " +
str(col))
|
Sorts the main dataframe according to the given column
:param col: column name
:type col: str
:example: ``ds.sort("Col 1")``
|
def safe_dump(d, fname, *args, **kwargs):
"""
Savely dump `d` to `fname` using yaml
This method creates a copy of `fname` called ``fname + '~'`` before saving
`d` to `fname` using :func:`ordered_yaml_dump`
Parameters
----------
d: object
The object to dump
fname: str
The path where to dump `d`
Other Parameters
----------------
``*args, **kwargs``
Will be forwarded to the :func:`ordered_yaml_dump` function
"""
if osp.exists(fname):
os.rename(fname, fname + '~')
lock = fasteners.InterProcessLock(fname + '.lck')
lock.acquire()
try:
with open(fname, 'w') as f:
ordered_yaml_dump(d, f, *args, **kwargs)
except:
raise
finally:
lock.release()
|
Savely dump `d` to `fname` using yaml
This method creates a copy of `fname` called ``fname + '~'`` before saving
`d` to `fname` using :func:`ordered_yaml_dump`
Parameters
----------
d: object
The object to dump
fname: str
The path where to dump `d`
Other Parameters
----------------
``*args, **kwargs``
Will be forwarded to the :func:`ordered_yaml_dump` function
|
def _getReader(self, filename, scoreClass):
"""
Obtain a JSON record reader for DIAMOND records.
@param filename: The C{str} file name holding the JSON.
@param scoreClass: A class to hold and compare scores (see scores.py).
"""
if filename.endswith('.json') or filename.endswith('.json.bz2'):
return JSONRecordsReader(filename, scoreClass)
else:
raise ValueError(
'Unknown DIAMOND record file suffix for file %r.' % filename)
|
Obtain a JSON record reader for DIAMOND records.
@param filename: The C{str} file name holding the JSON.
@param scoreClass: A class to hold and compare scores (see scores.py).
|
def _sentiment(self, distance=True):
"""Calculates the sentiment of an entity as it appears in text."""
sum_pos = 0
sum_neg = 0
text = self.parent
entity_positions = range(self.start, self.end)
non_entity_positions = set(range(len(text.words))).difference(entity_positions)
if not distance:
non_entity_polarities = np.array([text.words[i].polarity for i in non_entity_positions])
sum_pos = sum(non_entity_polarities == 1)
sum_neg = sum(non_entity_polarities == -1)
else:
polarities = np.array([w.polarity for w in text.words])
polarized_positions = np.argwhere(polarities != 0)[0]
polarized_non_entity_positions = non_entity_positions.intersection(polarized_positions)
sentence_len = len(text.words)
for i in polarized_non_entity_positions:
min_dist = min(abs(self.start - i), abs(self.end - i))
if text.words[i].polarity == 1:
sum_pos += 1.0 - (min_dist - 1.0) / (2.0 * sentence_len)
else:
sum_neg += 1.0 - (min_dist - 1.0) / (2.0 *sentence_len)
return (sum_pos, sum_neg)
|
Calculates the sentiment of an entity as it appears in text.
|
def filter_renderers(self, renderers, format):
"""
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
"""
renderers = [renderer for renderer in renderers
if renderer.format == format]
if not renderers:
raise Http404
return renderers
|
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
|
def _adaptSegment(self, segUpdate):
"""
This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
:param segUpdate SegmentUpdate instance
:returns: True if some synapses were decremented to 0 and the segment is a
candidate for trimming
"""
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
# Modify an existing segment
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment #%d for cell[%d,%d]" % (segment.segID, c, i)
print " before:",
segment.debugPrint()
# Mark it as recently useful
segment.lastActiveIteration = self.lrnIterationIdx
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
# If we have fixed resources, get rid of some old syns if necessary
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = (len(segment.syns) + len(synsToAdd) -
self.maxSynapsesPerSegment)
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
# Create a new segment
else:
# (segID, sequenceSegment flag, frequency, positiveActivations,
# totalActivations, lastActiveIteration)
newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment #%d for cell[%d,%d]" % (self.segID-1, c, i),
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment
|
This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
:param segUpdate SegmentUpdate instance
:returns: True if some synapses were decremented to 0 and the segment is a
candidate for trimming
|
def fit_transform(self, Z):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
Z : iterable or DictRDD with column 'X'
An iterable of raw_documents which yields either str, unicode or
file objects; or a DictRDD with column 'X' containing such
iterables.
Returns
-------
X : array, [n_samples, n_features] or DictRDD
Document-term matrix.
"""
self._validate_vocabulary()
# map analyzer and cache result
analyze = self.build_analyzer()
A = Z.transform(lambda X: list(map(analyze, X)), column='X').persist()
# create vocabulary
X = A[:, 'X'] if isinstance(A, DictRDD) else A
self.vocabulary_ = self._init_vocab(X)
# transform according to vocabulary
mapper = self.broadcast(self._count_vocab, A.context)
Z = A.transform(mapper, column='X', dtype=sp.spmatrix)
if not self.fixed_vocabulary_:
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
# limit features according to min_df, max_df parameters
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
kept_indices, self.stop_words_ = self._limit_features(
X, self.vocabulary_, max_doc_count, min_doc_count, max_features)
# sort features
map_index = self._sort_features(self.vocabulary_)
# combined mask
mask = kept_indices[map_index]
Z = Z.transform(lambda x: x[:, mask], column='X', dtype=sp.spmatrix)
A.unpersist()
return Z
|
Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
Z : iterable or DictRDD with column 'X'
An iterable of raw_documents which yields either str, unicode or
file objects; or a DictRDD with column 'X' containing such
iterables.
Returns
-------
X : array, [n_samples, n_features] or DictRDD
Document-term matrix.
|
def generate_description(dataset_name, local_cache_dir=None):
"""Generates desription for a given dataset in its README.md file in a dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used.
"""
assert (local_cache_dir!=None)
readme_file = open(os.path.join(local_cache_dir,'datasets',dataset_name,'README.md'), 'wt')
try:
df = fetch_data(dataset_name)
fnames = [col for col in df.columns if col!='class']
#determine all required values
types = get_types(df.ix[:, df.columns != 'class'])
feat=count_features_type(df.ix[:, df.columns != 'class'])
endpoint=determine_endpoint_type(df.ix[:, df.columns == 'class'])
mse=imbalance_metrics(df['class'].tolist())
#proceed with writing
readme_file.write('# %s\n\n' % dataset_name)
readme_file.write('## Summary Stats\n\n')
readme_file.write('#instances: %s\n\n' % str(len(df.axes[0])))
readme_file.write("#features: %s\n\n" % str(len(df.axes[1])-1))
readme_file.write(" #binary_features: %s\n\n" % feat[0])
readme_file.write(" #integer_features: %s\n\n" % feat[1])
readme_file.write(" #float_features: %s\n\n" % feat[2])
readme_file.write("Endpoint type: %s\n\n" % endpoint)
readme_file.write("#Classes: %s\n\n" % int(mse[0]))
readme_file.write("Imbalance metric: %s\n\n" % mse[1])
readme_file.write('## Feature Types\n\n %s\n\n' % '\n\n'.join([f + ':' + t for f,t in
zip(fnames,types)]))
except IOError as err:
print(err)
finally:
readme_file.close()
|
Generates desription for a given dataset in its README.md file in a dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used.
|
def predict(self, Xnew, full_cov=False, kern=None, **kwargs):
"""
Predict the function(s) at the new point(s) Xnew. For Student-t processes, this method is equivalent to
predict_noiseless as no likelihood is included in the model.
"""
return self.predict_noiseless(Xnew, full_cov=full_cov, kern=kern)
|
Predict the function(s) at the new point(s) Xnew. For Student-t processes, this method is equivalent to
predict_noiseless as no likelihood is included in the model.
|
def _get_encoding(encoding_or_label):
"""
Accept either an encoding object or label.
:param encoding: An :class:`Encoding` object or a label string.
:returns: An :class:`Encoding` object.
:raises: :exc:`~exceptions.LookupError` for an unknown label.
"""
if hasattr(encoding_or_label, 'codec_info'):
return encoding_or_label
encoding = lookup(encoding_or_label)
if encoding is None:
raise LookupError('Unknown encoding label: %r' % encoding_or_label)
return encoding
|
Accept either an encoding object or label.
:param encoding: An :class:`Encoding` object or a label string.
:returns: An :class:`Encoding` object.
:raises: :exc:`~exceptions.LookupError` for an unknown label.
|
def _piecewise(x, condlist, funclist, *args, **kw):
'''Fixed version of numpy.piecewise for 0-d arrays'''
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
|
Fixed version of numpy.piecewise for 0-d arrays
|
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
U0 = np.sign(self.block_sep0(self.Y)) / self.rho
U1 = self.block_sep1(self.Y) - self.S
return self.block_cat(U0, U1)
|
Return initialiser for working variable U.
|
def get_query_error(self,i):
"""Just get a single error characterization based on the index
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
"""
x = self._query_hpas[i]
h = x['hpa']
pos = x['pos']
prob = 0
be = BaseError('query')
be.set_observable(h.get_target(),h.get_query())
if i != 0 and pos == 0: # check for a total deletion before
prev = x['prev-hpa']
if len(prev.get_query()) == 0: # total deletion
be.set_unobserved_before(len(prev.get_target()),0,prev.get_target()[0],0.5)
if i != len(self._query_hpas)-1 and pos == len(h.get_query())-1: # check for a total deletion before
if x['next-hpa']:
foll = x['next-hpa']
if len(foll.get_query()) == 0: # total deletion
be.set_unobserved_after(len(foll.get_target()),0,foll.get_target()[0],0.5)
return be
|
Just get a single error characterization based on the index
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description
|
def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
|
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
|
def keys(self):
"""
Returns a sorted list of keys
"""
keys = list()
for n in range(len(self)):
# only append the valid keys
key = self.get_value()
if not key in ['', None]: keys.append(key)
return keys
|
Returns a sorted list of keys
|
def filter_trends(self, pattern=''):
"""
Filter available trends
"""
filtered_trends = {}
with open(self.abspath) as fobj:
for idx, line in enumerate(fobj):
variable_idx = idx-self._attributes['CATALOG']-1
if 'TIME SERIES' in line:
break
if pattern in line and variable_idx > 0:
filtered_trends[variable_idx] = line
return filtered_trends
|
Filter available trends
|
def check_output(self, cmd):
"""Wrapper for subprocess.check_output."""
ret, output = self._exec(cmd)
if not ret == 0:
raise CommandError(self)
return output
|
Wrapper for subprocess.check_output.
|
def setdbo(self, bond1, bond2, dboval):
"""Set the double bond orientation for bond1 and bond2
based on this bond"""
# this bond must be a double bond
if self.bondtype != 2:
raise FrownsError("To set double bond order, center bond must be double!")
assert dboval in [DX_CHI_CIS, DX_CHI_TRANS, DX_CHI_NO_DBO], "bad dboval value"
self.dbo.append(bond1, bond2, dboval)
|
Set the double bond orientation for bond1 and bond2
based on this bond
|
def update_tab_label(self, state_m):
"""Update all tab labels
:param rafcon.state_machine.states.state.State state_m: State model who's tab label is to be updated
"""
state_identifier = self.get_state_identifier(state_m)
if state_identifier not in self.tabs and state_identifier not in self.closed_tabs:
return
tab_info = self.tabs[state_identifier] if state_identifier in self.tabs else self.closed_tabs[state_identifier]
page = tab_info['page']
set_tab_label_texts(page.title_label, state_m, tab_info['source_code_view_is_dirty'])
|
Update all tab labels
:param rafcon.state_machine.states.state.State state_m: State model who's tab label is to be updated
|
def num_samples(input_filepath):
'''
Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 's')
if output == '0':
logger.warning("Number of samples unavailable for %s", input_filepath)
return int(output)
|
Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable
|
def reduce_multiline(string):
"""
reduces a multiline string to a single line of text.
args:
string: the text to reduce
"""
string = str(string)
return " ".join([item.strip()
for item in string.split("\n")
if item.strip()])
|
reduces a multiline string to a single line of text.
args:
string: the text to reduce
|
def _register_template(cls, template_bytes):
'''Registers the template for the widget and hooks init_template'''
# This implementation won't work if there are nested templates, but
# we can't do that anyways due to PyGObject limitations so it's ok
if not hasattr(cls, 'set_template'):
raise TypeError("Requires PyGObject 3.13.2 or greater")
cls.set_template(template_bytes)
bound_methods = set()
bound_widgets = set()
# Walk the class, find marked callbacks and child attributes
for name in dir(cls):
o = getattr(cls, name, None)
if inspect.ismethod(o):
if hasattr(o, '_gtk_callback'):
bound_methods.add(name)
# Don't need to call this, as connect_func always gets called
#cls.bind_template_callback_full(name, o)
elif isinstance(o, _Child):
cls.bind_template_child_full(name, True, 0)
bound_widgets.add(name)
# Have to setup a special connect function to connect at template init
# because the methods are not bound yet
cls.set_connect_func(_connect_func, cls)
cls.__gtemplate_methods__ = bound_methods
cls.__gtemplate_widgets__ = bound_widgets
base_init_template = cls.init_template
cls.init_template = lambda s: _init_template(s, cls, base_init_template)
|
Registers the template for the widget and hooks init_template
|
def sanitize_for_archive(url, headers, payload):
"""Sanitize URL of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns the sanitized url, plus the headers and payload
"""
url = re.sub('bot.*/', 'botXXXXX/', url)
return url, headers, payload
|
Sanitize URL of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns the sanitized url, plus the headers and payload
|
def _scope_and_enforce_robots(self, site, parent_page, outlinks):
'''
Returns tuple (
dict of {page_id: Page} of fresh `brozzler.Page` representing in
scope links accepted by robots policy,
set of in scope urls (canonicalized) blocked by robots policy,
set of out-of-scope urls (canonicalized)).
'''
pages = {} # {page_id: Page, ...}
blocked = set()
out_of_scope = set()
for url in outlinks or []:
url_for_scoping = urlcanon.semantic(url)
url_for_crawling = urlcanon.whatwg(url)
decision = site.accept_reject_or_neither(
url_for_scoping, parent_page=parent_page)
if decision is True:
hops_off = 0
elif decision is None:
decision = parent_page.hops_off < site.scope.get(
'max_hops_off', 0)
hops_off = parent_page.hops_off + 1
if decision is True:
if brozzler.is_permitted_by_robots(site, str(url_for_crawling)):
fresh_page = self._build_fresh_page(
site, parent_page, url, hops_off)
if fresh_page.id in pages:
self._merge_page(pages[fresh_page.id], fresh_page)
else:
pages[fresh_page.id] = fresh_page
else:
blocked.add(str(url_for_crawling))
else:
out_of_scope.add(str(url_for_crawling))
return pages, blocked, out_of_scope
|
Returns tuple (
dict of {page_id: Page} of fresh `brozzler.Page` representing in
scope links accepted by robots policy,
set of in scope urls (canonicalized) blocked by robots policy,
set of out-of-scope urls (canonicalized)).
|
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
|
Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager
|
def get_reduced_assignment(
self,
original_assignment,
cluster_topology,
max_partition_movements,
max_leader_only_changes,
max_movement_size=DEFAULT_MAX_MOVEMENT_SIZE,
force_progress=False,
):
"""Reduce the assignment based on the total actions.
Actions represent actual partition movements
and/or changes in preferred leader.
Get the difference of original and proposed assignment
and take the subset of this plan for given limit.
Argument(s):
original_assignment: Current assignment of cluster in zookeeper
cluster_topology: Cluster topology containing the new proposed-assignment of cluster
max_partition_movements:Maximum number of partition-movements in
final set of actions
max_leader_only_changes:Maximum number of actions with leader only changes
max_movement_size: Maximum size, in bytes, to move in final set of actions
force_progress: Whether to force progress if max_movement_size is too small
:return:
:reduced_assignment: Final reduced assignment
"""
new_assignment = cluster_topology.assignment
if (not original_assignment or not new_assignment or
max_partition_movements < 0 or max_leader_only_changes < 0 or
max_movement_size < 0):
return {}
# The replica set stays the same for leaders only changes
leaders_changes = [
(t_p, new_assignment[t_p])
for t_p, replica in six.iteritems(original_assignment)
if replica != new_assignment[t_p] and
set(replica) == set(new_assignment[t_p])
]
# The replica set is different for partitions changes
# Here we create a list of tuple ((topic, partion), # replica movements)
partition_change_count = [
(
t_p,
len(set(replica) - set(new_assignment[t_p])),
)
for t_p, replica in six.iteritems(original_assignment)
if set(replica) != set(new_assignment[t_p])
]
self.log.info(
"Total number of actions before reduction: %s.",
len(partition_change_count) + len(leaders_changes),
)
# Extract reduced plan maximizing uniqueness of topics and ensuring we do not
# go over the max_movement_size
reduced_actions = self._extract_actions_unique_topics(
partition_change_count,
max_partition_movements,
cluster_topology,
max_movement_size,
)
# Ensure progress is made if force_progress=True
if len(reduced_actions) == 0 and force_progress:
smallest_size = min([cluster_topology.partitions[t_p[0]].size for t_p in partition_change_count])
self.log.warning(
'--max-movement-size={max_movement_size} is too small, using smallest size'
' in set of partitions to move, {smallest_size} instead to force progress'.format(
max_movement_size=max_movement_size,
smallest_size=smallest_size,
)
)
max_movement_size = smallest_size
reduced_actions = self._extract_actions_unique_topics(
partition_change_count,
max_partition_movements,
cluster_topology,
max_movement_size,
)
reduced_partition_changes = [
(t_p, new_assignment[t_p]) for t_p in reduced_actions
]
self.log.info(
"Number of partition changes: %s."
" Number of leader-only changes: %s",
len(reduced_partition_changes),
min(max_leader_only_changes, len(leaders_changes)),
)
# Merge leaders and partition changes and generate the assignment
reduced_assignment = {
t_p: replicas
for t_p, replicas in (
reduced_partition_changes + leaders_changes[:max_leader_only_changes]
)
}
return reduced_assignment
|
Reduce the assignment based on the total actions.
Actions represent actual partition movements
and/or changes in preferred leader.
Get the difference of original and proposed assignment
and take the subset of this plan for given limit.
Argument(s):
original_assignment: Current assignment of cluster in zookeeper
cluster_topology: Cluster topology containing the new proposed-assignment of cluster
max_partition_movements:Maximum number of partition-movements in
final set of actions
max_leader_only_changes:Maximum number of actions with leader only changes
max_movement_size: Maximum size, in bytes, to move in final set of actions
force_progress: Whether to force progress if max_movement_size is too small
:return:
:reduced_assignment: Final reduced assignment
|
def request_token(self):
""" Returns url, request_token, request_secret"""
logging.debug("Getting request token from %s:%d",
self.server, self.port)
token, secret = self._token("/oauth/requestToken")
return "{}/oauth/authorize?oauth_token={}".format(self.host, token), \
token, secret
|
Returns url, request_token, request_secret
|
def referencegenomefinder(self):
"""
Finds the closest reference genome to the profile of interest
"""
# Initialise dictionaries
referencematch = defaultdict(make_dict)
referencehits = defaultdict(make_dict)
# Set the name of the reference profile file
referencegenomeprofile = '{}rMLST_referenceprofile.json'.format(self.referenceprofilepath)
# Open the reference profile and load the profile into memory
with open(referencegenomeprofile) as referencefile:
referencetypes = json.load(referencefile)
# Iterate through the samples
for sample in self.metadata:
if sample[self.analysistype].reportdir != 'NA':
# Iterate through the reference genomes in the profile
for genome in referencetypes:
# Initialise the number of identical alleles between the assembly of interest and the
# reference genome to 0
referencehits[sample.name][genome] = 0
# Iterate through all the genes in the analysis
for gene in self.bestdict[sample.name]:
# If the alleles match between the assembly of interest and the reference genome, increment
# the number of matches by 1
if list(self.bestdict[sample.name][gene].keys())[0] == referencetypes[genome][gene]:
referencematch[sample.name][genome][gene] = 1
referencehits[sample.name][genome] += 1
else:
referencematch[sample.name][genome][gene] = 0
#
for sample in self.metadata:
if sample[self.analysistype].reportdir != 'NA':
# Get the best number of matches
# From: https://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
try:
matches = sorted(referencehits[sample.name].items(),
key=operator.itemgetter(1), reverse=True)
most_matches = matches[0][1]
i = 0
match_list = list()
while matches[i][1] == most_matches:
match_list.append(matches[i])
i += 1
sorted_list = sorted(match_list)
sortedmatches = sorted_list[0]
except IndexError:
sortedmatches = (0, 0)
# If there are fewer matches than the total number of genes in the typing scheme
if 0 < int(sortedmatches[1]) < len(sample[self.analysistype].allelenames):
mismatches = []
# Iterate through the gene in the analysis
for gene, allele in referencetypes[sortedmatches[0]].items():
# Populate :self.referencegenome with the genome name, best reference match, number of matches,
# gene, query allele(s), and percent identity
percentidentity = '{:.2f}'.format(list(self.bestdict[sample.name][gene].values())[0])
self.referencegenome[sample.name][sortedmatches[0]][sortedmatches[1]][gene][list(self.bestdict[
sample.name][gene].keys())[0]] = percentidentity
if list(self.bestdict[sample.name][gene].keys())[0] != allele:
sample[self.analysistype].referencegenome = sortedmatches[0]
sample.general.referencegenus = sortedmatches[0].split('_')[0]
sample[self.analysistype].referencegenomepath = '{}{}.fa' \
.format(self.referenceprofilepath, sortedmatches[0])
sample[self.analysistype].matchestoreferencegenome = sortedmatches[1]
mismatches.append(({gene: ('{} ({})'.format(list(self.bestdict[sample.name][gene]
.keys())[0], allele))}))
# sample[self.analysistype].mismatchestoreferencegenome = sorted(mismatches)
sample[self.analysistype].mismatchestoreferencegenome = mismatches
elif sortedmatches == 0:
for gene in sample[self.analysistype].allelenames:
# Populate the profile of results with 'negative' values for sequence type and sorted matches
self.referencegenome[sample.name][sortedmatches[0]][0][gene]['NA'] = 0
sample[self.analysistype].referencegenome = 'NA'
sample.general.referencegenus = 'NA'
sample[self.analysistype].referencegenomepath = 'NA'
sample[self.analysistype].matchestoreferencegenome = 0
sample[self.analysistype].mismatchestoreferencegenome = [0]
# Otherwise, the query profile matches the reference profile
else:
for gene in referencetypes[sortedmatches[0]]:
# Populate self.referencegenome as above
self.referencegenome[sample.name][sortedmatches[0]][sortedmatches[1]][gene][list(self.bestdict[
sample.name][gene].keys())[0]] = '{:.2f}'.format(list(self.bestdict[
sample.name][gene].values())[0])
sample[self.analysistype].referencegenome = sortedmatches[0]
sample[self.analysistype].referencegenomepath = '{}{}.fa' \
.format(self.referenceprofilepath, sortedmatches[0])
sample.general.referencegenus = sortedmatches[0].split('_')[0]
sample[self.analysistype].matchestoreferencegenome = sortedmatches[1]
sample[self.analysistype].mismatchestoreferencegenome = [0]
# Print the results to file
make_path(self.reportpath)
with open('{}referencegenomes.csv'.format(self.reportpath), 'w') as referencegenomereport:
row = 'Strain,referencegenome,numberofmatches\n'
for sample in self.metadata:
if sample[self.analysistype].reportdir != 'NA':
row += '{},{},{}\n'.format(sample.name, sample[self.analysistype].referencegenome,
sample[self.analysistype].matchestoreferencegenome)
referencegenomereport.write(row)
dotter()
|
Finds the closest reference genome to the profile of interest
|
def identical_dataset_and_algorithm_tuner(self, additional_parents=None):
"""Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"IdenticalDataAndAlgorithm" and parents as the union of provided list of ``additional_parents`` and the ``self``
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the identical dataset and algorithm tuner.
Returns:
sagemaker.tuner.HyperparameterTuner: HyperparameterTuner instance which can be used to launch identical
dataset and algorithm tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> identical_dataset_algo_tuner = parent_tuner.identical_dataset_and_algorithm_tuner(
>>> additional_parents={"parent-job-2"})
Later On:
>>> identical_dataset_algo_tuner.fit(inputs={})
"""
return self._create_warm_start_tuner(additional_parents=additional_parents,
warm_start_type=WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM)
|
Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"IdenticalDataAndAlgorithm" and parents as the union of provided list of ``additional_parents`` and the ``self``
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the identical dataset and algorithm tuner.
Returns:
sagemaker.tuner.HyperparameterTuner: HyperparameterTuner instance which can be used to launch identical
dataset and algorithm tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> identical_dataset_algo_tuner = parent_tuner.identical_dataset_and_algorithm_tuner(
>>> additional_parents={"parent-job-2"})
Later On:
>>> identical_dataset_algo_tuner.fit(inputs={})
|
def sens_mppt_send(self, mppt_timestamp, mppt1_volt, mppt1_amp, mppt1_pwm, mppt1_status, mppt2_volt, mppt2_amp, mppt2_pwm, mppt2_status, mppt3_volt, mppt3_amp, mppt3_pwm, mppt3_status, force_mavlink1=False):
'''
Maximum Power Point Tracker (MPPT) sensor data for solar module power
performance tracking
mppt_timestamp : MPPT last timestamp (uint64_t)
mppt1_volt : MPPT1 voltage (float)
mppt1_amp : MPPT1 current (float)
mppt1_pwm : MPPT1 pwm (uint16_t)
mppt1_status : MPPT1 status (uint8_t)
mppt2_volt : MPPT2 voltage (float)
mppt2_amp : MPPT2 current (float)
mppt2_pwm : MPPT2 pwm (uint16_t)
mppt2_status : MPPT2 status (uint8_t)
mppt3_volt : MPPT3 voltage (float)
mppt3_amp : MPPT3 current (float)
mppt3_pwm : MPPT3 pwm (uint16_t)
mppt3_status : MPPT3 status (uint8_t)
'''
return self.send(self.sens_mppt_encode(mppt_timestamp, mppt1_volt, mppt1_amp, mppt1_pwm, mppt1_status, mppt2_volt, mppt2_amp, mppt2_pwm, mppt2_status, mppt3_volt, mppt3_amp, mppt3_pwm, mppt3_status), force_mavlink1=force_mavlink1)
|
Maximum Power Point Tracker (MPPT) sensor data for solar module power
performance tracking
mppt_timestamp : MPPT last timestamp (uint64_t)
mppt1_volt : MPPT1 voltage (float)
mppt1_amp : MPPT1 current (float)
mppt1_pwm : MPPT1 pwm (uint16_t)
mppt1_status : MPPT1 status (uint8_t)
mppt2_volt : MPPT2 voltage (float)
mppt2_amp : MPPT2 current (float)
mppt2_pwm : MPPT2 pwm (uint16_t)
mppt2_status : MPPT2 status (uint8_t)
mppt3_volt : MPPT3 voltage (float)
mppt3_amp : MPPT3 current (float)
mppt3_pwm : MPPT3 pwm (uint16_t)
mppt3_status : MPPT3 status (uint8_t)
|
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
|
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
|
def get_compositions_by_repository(self, repository_id):
"""Gets the list of ``Compositions`` associated with a ``Repository``.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.CompositionList) - list of related
compositions
raise: NotFound - ``repository_id`` is not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_composition_lookup_session_for_repository(repository_id, proxy=self._proxy)
lookup_session.use_isolated_repository_view()
return lookup_session.get_compositions()
|
Gets the list of ``Compositions`` associated with a ``Repository``.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.CompositionList) - list of related
compositions
raise: NotFound - ``repository_id`` is not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def make_tarfile(output_filename, source_dir):
'''
Tar a directory
'''
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
|
Tar a directory
|
def status_favourite(self, id):
"""
Favourite a status.
Returns a `toot dict`_ with the favourited status.
"""
id = self.__unpack_id(id)
url = '/api/v1/statuses/{0}/favourite'.format(str(id))
return self.__api_request('POST', url)
|
Favourite a status.
Returns a `toot dict`_ with the favourited status.
|
def _open_ftp(self):
# type: () -> FTP
"""Open a new ftp object.
"""
_ftp = FTP()
_ftp.set_debuglevel(0)
with ftp_errors(self):
_ftp.connect(self.host, self.port, self.timeout)
_ftp.login(self.user, self.passwd, self.acct)
self._features = {}
try:
feat_response = _decode(_ftp.sendcmd("FEAT"), "latin-1")
except error_perm: # pragma: no cover
self.encoding = "latin-1"
else:
self._features = self._parse_features(feat_response)
self.encoding = "utf-8" if "UTF8" in self._features else "latin-1"
if not PY2:
_ftp.file = _ftp.sock.makefile( # type: ignore
"r", encoding=self.encoding
)
_ftp.encoding = self.encoding
self._welcome = _ftp.welcome
return _ftp
|
Open a new ftp object.
|
def process(source, target, rdfsonly, base=None, logger=logging):
'''
Prepare a statement into a triple ready for rdflib graph
'''
for link in source.match():
s, p, o = link[:3]
#SKip docheader statements
if s == (base or '') + '@docheader': continue
if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p]
if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o]
if p == VERSA_BASEIRI + 'refines':
tlinks = list(source.match(s, TYPE_REL))
if tlinks:
if tlinks[0][TARGET] == VERSA_BASEIRI + 'Resource':
p = I(RDFS_NAMESPACE + 'subClassOf')
elif tlinks[0][TARGET] == VERSA_BASEIRI + 'Property':
p = I(RDFS_NAMESPACE + 'subPropertyOf')
if p == VERSA_BASEIRI + 'properties':
suri = I(iri.absolutize(s, base)) if base else s
target.add((URIRef(o), URIRef(RDFS_NAMESPACE + 'domain'), URIRef(suri)))
continue
if p == VERSA_BASEIRI + 'value':
if o not in ['Literal', 'IRI']:
ouri = I(iri.absolutize(o, base)) if base else o
target.add((URIRef(s), URIRef(RDFS_NAMESPACE + 'range'), URIRef(ouri)))
continue
s = URIRef(s)
#Translate v:type to rdf:type
p = RDF.type if p == TYPE_REL else URIRef(p)
o = URIRef(o) if isinstance(o, I) else Literal(o)
if not rdfsonly or p.startswith(RDF_NAMESPACE) or p.startswith(RDFS_NAMESPACE):
target.add((s, p, o))
return
|
Prepare a statement into a triple ready for rdflib graph
|
def mutate(self, row):
""" Add a row to the batch. If the current batch meets one of the size
limits, the batch is sent synchronously.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_batcher_mutate]
:end-before: [END bigtable_batcher_mutate]
:type row: class
:param row: class:`~google.cloud.bigtable.row.DirectRow`.
:raises: One of the following:
* :exc:`~.table._BigtableRetryableError` if any
row returned a transient error.
* :exc:`RuntimeError` if the number of responses doesn't
match the number of rows that were retried
* :exc:`.batcher.MaxMutationsError` if any row exceeds max
mutations count.
"""
mutation_count = len(row._get_mutations())
if mutation_count > MAX_MUTATIONS:
raise MaxMutationsError(
"The row key {} exceeds the number of mutations {}.".format(
row.row_key, mutation_count
)
)
if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS:
self.flush()
self.rows.append(row)
self.total_mutation_count += mutation_count
self.total_size += row.get_mutations_size()
if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count:
self.flush()
|
Add a row to the batch. If the current batch meets one of the size
limits, the batch is sent synchronously.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_batcher_mutate]
:end-before: [END bigtable_batcher_mutate]
:type row: class
:param row: class:`~google.cloud.bigtable.row.DirectRow`.
:raises: One of the following:
* :exc:`~.table._BigtableRetryableError` if any
row returned a transient error.
* :exc:`RuntimeError` if the number of responses doesn't
match the number of rows that were retried
* :exc:`.batcher.MaxMutationsError` if any row exceeds max
mutations count.
|
def query(self, coords, **kwargs):
"""
Returns E(B-V), in mags, at the specified location(s) on the sky.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
Returns:
A float array of the reddening, in magnitudes of E(B-V), at the
selected coordinates.
"""
return super(Lenz2017Query, self).query(coords, **kwargs)
|
Returns E(B-V), in mags, at the specified location(s) on the sky.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
Returns:
A float array of the reddening, in magnitudes of E(B-V), at the
selected coordinates.
|
def write_config_file(self, parsed_namespace, output_file_paths, exit_after=False):
"""Write the given settings to output files.
Args:
parsed_namespace: namespace object created within parse_known_args()
output_file_paths: any number of file paths to write the config to
exit_after: whether to exit the program after writing the config files
"""
for output_file_path in output_file_paths:
# validate the output file path
try:
with open(output_file_path, "w") as output_file:
pass
except IOError as e:
raise ValueError("Couldn't open %s for writing: %s" % (
output_file_path, e))
if output_file_paths:
# generate the config file contents
config_items = self.get_items_for_config_file_output(
self._source_to_settings, parsed_namespace)
file_contents = self._config_file_parser.serialize(config_items)
for output_file_path in output_file_paths:
with open(output_file_path, "w") as output_file:
output_file.write(file_contents)
message = "Wrote config file to " + ", ".join(output_file_paths)
if exit_after:
self.exit(0, message)
else:
print(message)
|
Write the given settings to output files.
Args:
parsed_namespace: namespace object created within parse_known_args()
output_file_paths: any number of file paths to write the config to
exit_after: whether to exit the program after writing the config files
|
def find_threads_by_name(self, name, bExactMatch = True):
"""
Find threads by name, using different search methods.
@type name: str, None
@param name: Name to look for. Use C{None} to find nameless threads.
@type bExactMatch: bool
@param bExactMatch: C{True} if the name must be
B{exactly} as given, C{False} if the name can be
loosely matched.
This parameter is ignored when C{name} is C{None}.
@rtype: list( L{Thread} )
@return: All threads matching the given name.
"""
found_threads = list()
# Find threads with no name.
if name is None:
for aThread in self.iter_threads():
if aThread.get_name() is None:
found_threads.append(aThread)
# Find threads matching the given name exactly.
elif bExactMatch:
for aThread in self.iter_threads():
if aThread.get_name() == name:
found_threads.append(aThread)
# Find threads whose names match the given substring.
else:
for aThread in self.iter_threads():
t_name = aThread.get_name()
if t_name is not None and name in t_name:
found_threads.append(aThread)
return found_threads
|
Find threads by name, using different search methods.
@type name: str, None
@param name: Name to look for. Use C{None} to find nameless threads.
@type bExactMatch: bool
@param bExactMatch: C{True} if the name must be
B{exactly} as given, C{False} if the name can be
loosely matched.
This parameter is ignored when C{name} is C{None}.
@rtype: list( L{Thread} )
@return: All threads matching the given name.
|
def main(self, c):
"""
:type c: Complex
:rtype: Sfix
"""
conj = self.conjugate.main(c)
mult = self.complex_mult.main(c, conj)
angle = self.angle.main(mult)
self.y = self.GAIN_SFIX * angle
return self.y
|
:type c: Complex
:rtype: Sfix
|
def get_object(self):
"""
Get the object for publishing
Raises a http404 error if the object is not found.
"""
obj = super(PublishActionView, self).get_object()
if obj:
if not hasattr(obj, 'publish'):
raise http.Http404
return obj
|
Get the object for publishing
Raises a http404 error if the object is not found.
|
def pset_field(item_type, optional=False, initial=()):
"""
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
return _sequence_field(CheckedPSet, item_type, optional,
initial)
|
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
|
def redirect_to():
"""302/3XX Redirects to the given URL.
---
tags:
- Redirects
produces:
- text/html
get:
parameters:
- in: query
name: url
type: string
required: true
- in: query
name: status_code
type: int
post:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
patch:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
put:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
responses:
302:
description: A redirection.
"""
args_dict = request.args.items()
args = CaseInsensitiveDict(args_dict)
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = app.make_response("")
response.status_code = 302
if "status_code" in args:
status_code = int(args["status_code"])
if status_code >= 300 and status_code < 400:
response.status_code = status_code
response.headers["Location"] = args["url"].encode("utf-8")
return response
|
302/3XX Redirects to the given URL.
---
tags:
- Redirects
produces:
- text/html
get:
parameters:
- in: query
name: url
type: string
required: true
- in: query
name: status_code
type: int
post:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
patch:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
put:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
responses:
302:
description: A redirection.
|
def inspect_built_image(self):
"""
inspect built image
:return: dict
"""
logger.info("inspecting built image '%s'", self.image_id)
self.ensure_is_built()
# dict with lots of data, see man docker-inspect
inspect_data = self.tasker.inspect_image(self.image_id)
return inspect_data
|
inspect built image
:return: dict
|
def get(self):
"""
Get a connection from the pool, to make and receive traffic.
If the connection fails for any reason (socket.error), it is dropped
and a new one is scheduled. Please use @retry as a way to automatically
retry whatever operation you were performing.
"""
self.lock.acquire()
try:
c = self.conn.popleft()
yield c
except self.exc_classes:
# The current connection has failed, drop it and create a new one
gevent.spawn_later(1, self._addOne)
raise
except:
self.conn.append(c)
self.lock.release()
raise
else:
# NOTE: cannot use finally because MUST NOT reuse the connection
# if it failed (socket.error)
self.conn.append(c)
self.lock.release()
|
Get a connection from the pool, to make and receive traffic.
If the connection fails for any reason (socket.error), it is dropped
and a new one is scheduled. Please use @retry as a way to automatically
retry whatever operation you were performing.
|
def task(self, task_name):
"""
Returns an ENVI Py Engine Task object. See ENVI Py Engine Task for examples.
:param task_name: The name of the task to retrieve.
:return: An ENVI Py Engine Task object.
"""
return Task(uri=':'.join((self._engine_name, task_name)), cwd=self._cwd)
|
Returns an ENVI Py Engine Task object. See ENVI Py Engine Task for examples.
:param task_name: The name of the task to retrieve.
:return: An ENVI Py Engine Task object.
|
def apply(self, func, *args, **kwargs):
"""Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame.
"""
self._prep_pandas_groupby()
def key_by_index(data):
"""Key each row by its index.
"""
# TODO: Is there a better way to do this?
for key, row in data.iterrows():
yield (key, pd.DataFrame.from_dict(
dict([(key, row)]), orient='index'))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
dataframe = self._sortIfNeeded(reKeyedRDD).values()
return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
|
Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame.
|
def __bindings(self):
"""Binds events to handlers"""
self.textctrl.Bind(wx.EVT_TEXT, self.OnText)
self.fontbutton.Bind(wx.EVT_BUTTON, self.OnFont)
self.Bind(csel.EVT_COLOURSELECT, self.OnColor)
|
Binds events to handlers
|
def get_grouped_translations(instances, **kwargs):
"""
Takes instances and returns grouped translations ready to
be set in cache.
"""
grouped_translations = collections.defaultdict(list)
if not instances:
return grouped_translations
if not isinstance(instances, collections.Iterable):
instances = [instances]
if isinstance(instances, QuerySet):
model = instances.model
else:
model = instances[0]._meta.model
instances_ids = []
for instance in instances:
instances_ids.append(instance.pk)
if instance._meta.model != model:
raise Exception(
"You cannot use different model instances, only one authorized."
)
from .models import Translation
from .mixins import ModelMixin
decider = model._meta.linguist.get("decider", Translation)
identifier = model._meta.linguist.get("identifier", None)
chunks_length = kwargs.get("chunks_length", None)
populate_missing = kwargs.get("populate_missing", True)
if identifier is None:
raise Exception('You must define Linguist "identifier" meta option')
lookup = dict(identifier=identifier)
for kwarg in ("field_names", "languages"):
value = kwargs.get(kwarg, None)
if value is not None:
if not isinstance(value, (list, tuple)):
value = [value]
lookup["%s__in" % kwarg[:-1]] = value
if chunks_length is not None:
translations_qs = []
for ids in utils.chunks(instances_ids, chunks_length):
ids_lookup = copy.copy(lookup)
ids_lookup["object_id__in"] = ids
translations_qs.append(decider.objects.filter(**ids_lookup))
translations = itertools.chain.from_iterable(translations_qs)
else:
lookup["object_id__in"] = instances_ids
translations = decider.objects.filter(**lookup)
for translation in translations:
grouped_translations[translation.object_id].append(translation)
return grouped_translations
|
Takes instances and returns grouped translations ready to
be set in cache.
|
def _scan_for_tokens(contents):
"""Scan a string for tokens and return immediate form tokens."""
# Regexes are in priority order. Changing the order may alter the
# behavior of the lexer
scanner = re.Scanner([
# Things inside quotes
(r"(?<![^\s\(])([\"\'])(?:(?=(\\?))\2.)*?\1(?![^\s\)])",
lambda s, t: (TokenType.QuotedLiteral, t)),
# Numbers on their own
(r"(?<![^\s\(])-?[0-9]+(?![^\s\)\(])", lambda s, t: (TokenType.Number,
t)),
# Left Paren
(r"\(", lambda s, t: (TokenType.LeftParen, t)),
# Right Paren
(r"\)", lambda s, t: (TokenType.RightParen, t)),
# Either a valid function name or variable name.
(r"(?<![^\s\(])[a-zA-z_][a-zA-Z0-9_]*(?![^\s\)\(])",
lambda s, t: (TokenType.Word, t)),
# Variable dereference.
(r"(?<![^\s\(])\${[a-zA-z_][a-zA-Z0-9_]*}(?![^\s\)])",
lambda s, t: (TokenType.Deref, t)),
# Newline
(r"\n", lambda s, t: (TokenType.Newline, t)),
# Whitespace
(r"\s+", lambda s, t: (TokenType.Whitespace, t)),
# The beginning of a double-quoted string, terminating at end of line
(r"(?<![^\s\(\\])[\"]([^\"]|\\[\"])*$",
lambda s, t: (TokenType.BeginDoubleQuotedLiteral, t)),
# The end of a double-quoted string
(r"[^\s]*(?<!\\)[\"](?![^\s\)])",
lambda s, t: (TokenType.EndDoubleQuotedLiteral, t)),
# The beginning of a single-quoted string, terminating at end of line
(r"(?<![^\s\(\\])[\']([^\']|\\[\'])*$",
lambda s, t: (TokenType.BeginSingleQuotedLiteral, t)),
# The end of a single-quoted string
(r"[^\s]*(?<!\\)[\'](?![^\s\)])",
lambda s, t: (TokenType.EndSingleQuotedLiteral, t)),
# Begin-RST Comment Block
(r"#.rst:$", lambda s, t: (TokenType.BeginRSTComment, t)),
# Begin Inline RST
(r"#\[=*\[.rst:$", lambda s, t: (TokenType.BeginInlineRST, t)),
# End Inline RST
(r"#\]=*\]$", lambda s, t: (TokenType.EndInlineRST, t)),
# Comment
(r"#", lambda s, t: (TokenType.Comment, t)),
# Catch-all for literals which are compound statements.
(r"([^\s\(\)]+|[^\s\(]*[^\)]|[^\(][^\s\)]*)",
lambda s, t: (TokenType.UnquotedLiteral, t))
])
tokens_return = []
lines = contents.splitlines(True)
lineno = 0
for line in lines:
lineno += 1
col = 1
tokens, remaining = scanner.scan(line)
if remaining != "":
msg = "Unknown tokens found on line {0}: {1}".format(lineno,
remaining)
raise RuntimeError(msg)
for token_type, token_contents in tokens:
tokens_return.append(Token(type=token_type,
content=token_contents,
line=lineno,
col=col))
col += len(token_contents)
return tokens_return
|
Scan a string for tokens and return immediate form tokens.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.