code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def make_list(cls, item_converter=None, listsep=','):
"""
Create a type converter for a list of items (many := 1..*).
The parser accepts anything and the converter needs to fail on errors.
:param item_converter: Type converter for an item.
:param listsep: List separator to use (as string).
:return: Type converter function object for the list.
"""
if not item_converter:
item_converter = parse_anything
return cls.with_cardinality(Cardinality.many, item_converter,
pattern=cls.anything_pattern, listsep=listsep)
|
Create a type converter for a list of items (many := 1..*).
The parser accepts anything and the converter needs to fail on errors.
:param item_converter: Type converter for an item.
:param listsep: List separator to use (as string).
:return: Type converter function object for the list.
|
def register_path(self, path, modified_time=None):
"""
Registers given path.
:param path: Path name.
:type path: unicode
:param modified_time: Custom modified time.
:type modified_time: int or float
:return: Method success.
:rtype: bool
"""
if not foundations.common.path_exists(path):
raise foundations.exceptions.PathExistsError("{0} | '{1}' path doesn't exists!".format(
self.__class__.__name__, path))
if path in self:
raise umbra.exceptions.PathRegistrationError("{0} | '{1}' path is already registered!".format(
self.__class__.__name__, path))
self.__paths[path] = (self.get_path_modified_time(
path) if modified_time is None else modified_time, os.path.isfile(path))
return True
|
Registers given path.
:param path: Path name.
:type path: unicode
:param modified_time: Custom modified time.
:type modified_time: int or float
:return: Method success.
:rtype: bool
|
def __File_Command_lineEdit_set_ui(self):
"""
Fills **File_Command_lineEdit** Widget.
"""
# Adding settings key if it doesn't exists.
self.__settings.get_key(self.__settings_section, "file_command").isNull() and \
self.__settings.set_key(self.__settings_section, "file_command", self.__file_command)
file_command = self.__settings.get_key(self.__settings_section, "file_command").toString()
LOGGER.debug("> Setting '{0}' with value '{1}'.".format("File_Command_lineEdit",
file_command))
self.__file_command = file_command
self.File_Command_lineEdit.setText(file_command)
|
Fills **File_Command_lineEdit** Widget.
|
def _get_metadata(field, expr, metadata_expr, no_metadata_rule):
"""Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
"""
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None:
return metadata_expr
try:
return expr._child['_'.join(((expr._name or ''), field))]
except (ValueError, AttributeError):
if no_metadata_rule == 'raise':
raise ValueError(
"no %s table could be reflected for %s" % (field, expr)
)
elif no_metadata_rule == 'warn':
warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4)
return None
|
Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
|
def drop(self, format_p, action):
"""Informs the source that a drop event occurred for a pending
drag and drop operation.
in format_p of type str
The mime type the data must be in.
in action of type :class:`DnDAction`
The action to use.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorVmError`
VMM device is not available.
"""
if not isinstance(format_p, basestring):
raise TypeError("format_p can only be an instance of type basestring")
if not isinstance(action, DnDAction):
raise TypeError("action can only be an instance of type DnDAction")
progress = self._call("drop",
in_p=[format_p, action])
progress = IProgress(progress)
return progress
|
Informs the source that a drop event occurred for a pending
drag and drop operation.
in format_p of type str
The mime type the data must be in.
in action of type :class:`DnDAction`
The action to use.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorVmError`
VMM device is not available.
|
def compare_modules(file_, imports):
"""Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
tuple: The modules not imported in the project, but do exist in the
specified file.
"""
modules = parse_requirements(file_)
imports = [imports[i]["name"] for i in range(len(imports))]
modules = [modules[i]["name"] for i in range(len(modules))]
modules_not_imported = set(modules) - set(imports)
return modules_not_imported
|
Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
tuple: The modules not imported in the project, but do exist in the
specified file.
|
def _status_message_0x01_received(self, msg):
"""Handle status received messages.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On
"""
if msg.cmd2 == 0x00 or msg.cmd2 == 0x02:
self._update_subscribers(0x00)
elif msg.cmd2 == 0x01 or msg.cmd2 == 0x03:
self._update_subscribers(0xff)
else:
raise ValueError
|
Handle status received messages.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On
|
def dump(self, f):
"""
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
"""
self.validate()
with _open_file_obj(f, "w") as f:
parser = self._get_parser()
self.serialize(parser)
self.build_file(parser, f)
|
Dump data to a file.
:param f: file-like object or path to file
:type f: file or str
|
def determine_actions(self, request, view):
"""Allow all allowed methods"""
from rest_framework.generics import GenericAPIView
actions = {}
excluded_methods = {'HEAD', 'OPTIONS', 'PATCH', 'DELETE'}
for method in set(view.allowed_methods) - excluded_methods:
view.request = clone_request(request, method)
try:
if isinstance(view, GenericAPIView):
has_object = view.lookup_url_kwarg or view.lookup_field in view.kwargs
elif method in {'PUT', 'POST'}:
has_object = method in {'PUT'}
else:
continue
# Test global permissions
if hasattr(view, 'check_permissions'):
view.check_permissions(view.request)
# Test object permissions
if has_object and hasattr(view, 'get_object'):
view.get_object()
except (exceptions.APIException, PermissionDenied, Http404):
pass
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer()
actions[method] = self.get_serializer_info(serializer)
finally:
view.request = request
return actions
|
Allow all allowed methods
|
def draw(obj, plane='3d', inline=False, **kwargs):
'''Draw the morphology using in the given plane
plane (str): a string representing the 2D plane (example: 'xy')
or '3d', '3D' for a 3D view
inline (bool): must be set to True for interactive ipython notebook plotting
'''
if plane.lower() == '3d':
return _plot_neuron3d(obj, inline, **kwargs)
return _plot_neuron(obj, plane, inline, **kwargs)
|
Draw the morphology using in the given plane
plane (str): a string representing the 2D plane (example: 'xy')
or '3d', '3D' for a 3D view
inline (bool): must be set to True for interactive ipython notebook plotting
|
def rmdir(self, dir_name):
"""Remove cur_dir/name."""
self.check_write(dir_name)
path = normpath_url(join_url(self.cur_dir, dir_name))
# write("REMOVE %r" % path)
shutil.rmtree(path)
|
Remove cur_dir/name.
|
def _create_json(self):
"""
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
"""
data_json = {
"rclass": "local",
"key": self.name,
"description": self.description,
"packageType": self.packageType,
"notes": "",
"includesPattern": "**/*",
"excludesPattern": "",
"repoLayoutRef": self.repoLayoutRef,
"dockerApiVersion": self.dockerApiVersion,
"checksumPolicyType": "client-checksums",
"handleReleases": True,
"handleSnapshots": True,
"maxUniqueSnapshots": 0,
"snapshotVersionBehavior": "unique",
"suppressPomConsistencyChecks": True,
"blackedOut": False,
"propertySets": [],
"archiveBrowsingEnabled": self.archiveBrowsingEnabled,
"yumRootDepth": 0,
}
return data_json
|
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
|
def update(cls, id, memory, cores, console, password, background,
max_memory):
"""Update a virtual machine."""
if not background and not cls.intty():
background = True
vm_params = {}
if memory:
vm_params['memory'] = memory
if cores:
vm_params['cores'] = cores
if console:
vm_params['console'] = console
if password:
vm_params['password'] = password
if max_memory:
vm_params['vm_max_memory'] = max_memory
result = cls.call('hosting.vm.update', cls.usable_id(id), vm_params)
if background:
return result
# interactive mode, run a progress bar
cls.echo('Updating your Virtual Machine %s.' % id)
cls.display_progress(result)
|
Update a virtual machine.
|
def probability_density(self, X):
"""Compute probability density.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
np.ndarray
"""
self.check_fit()
return norm.pdf(X, loc=self.mean, scale=self.std)
|
Compute probability density.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
np.ndarray
|
def _put(self, *args, **kwargs):
"""
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>`
if there is an error from New Relic
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
response = requests.put(*args, **kwargs)
if not response.ok:
raise NewRelicAPIServerException('{}: {}'.format(response.status_code, response.text))
return response.json()
|
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>`
if there is an error from New Relic
|
def handle_error(result, exception_class=None):
"""
Extracts the last Windows error message into a python unicode string
:param result:
A function result, 0 or None indicates failure
:param exception_class:
The exception class to use for the exception if an error occurred
:return:
A unicode string error message
"""
if result == 0:
return
if result == Secur32Const.SEC_E_OUT_OF_SEQUENCE:
raise TLSError('A packet was received out of order')
if result == Secur32Const.SEC_E_MESSAGE_ALTERED:
raise TLSError('A packet was received altered')
if result == Secur32Const.SEC_E_CONTEXT_EXPIRED:
raise TLSError('The TLS session expired')
_, error_string = get_error()
if not isinstance(error_string, str_cls):
error_string = _try_decode(error_string)
if exception_class is None:
exception_class = OSError
raise exception_class(('SECURITY_STATUS error 0x%0.2X: ' % result) + error_string)
|
Extracts the last Windows error message into a python unicode string
:param result:
A function result, 0 or None indicates failure
:param exception_class:
The exception class to use for the exception if an error occurred
:return:
A unicode string error message
|
def instances_set(self, root, reservation):
"""Parse instance data out of an XML payload.
@param root: The root node of the XML payload.
@param reservation: The L{Reservation} associated with the instances
from the response.
@return: A C{list} of L{Instance}s.
"""
instances = []
for instance_data in root.find("instancesSet"):
instances.append(self.instance(instance_data, reservation))
return instances
|
Parse instance data out of an XML payload.
@param root: The root node of the XML payload.
@param reservation: The L{Reservation} associated with the instances
from the response.
@return: A C{list} of L{Instance}s.
|
def push(self, value):
"""
SNEAK value TO FRONT OF THE QUEUE
"""
if self.closed and not self.allow_add_after_close:
Log.error("Do not push to closed queue")
with self.lock:
self._wait_for_queue_space()
if not self.closed:
self.queue.appendleft(value)
return self
|
SNEAK value TO FRONT OF THE QUEUE
|
def matches_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - fpr * 100)
return [sum(fg_vals >= s), sum(bg_vals >= s)]
|
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
|
def p_kwl_kwl(self, p):
''' kwl : kwl SEPARATOR kwl
'''
_LOGGER.debug("kwl -> kwl ; kwl")
if p[3] is not None:
p[0] = p[3]
elif p[1] is not None:
p[0] = p[1]
else:
p[0] = TypedClass(None, TypedClass.UNKNOWN)
|
kwl : kwl SEPARATOR kwl
|
def device_key(self, device_key):
"""
Sets the device_key of this DeviceData.
The fingerprint of the device certificate.
:param device_key: The device_key of this DeviceData.
:type: str
"""
if device_key is not None and len(device_key) > 512:
raise ValueError("Invalid value for `device_key`, length must be less than or equal to `512`")
self._device_key = device_key
|
Sets the device_key of this DeviceData.
The fingerprint of the device certificate.
:param device_key: The device_key of this DeviceData.
:type: str
|
def parse(system):
"""
Parse input file with the given format in system.files.input_format
"""
t, _ = elapsed()
input_format = system.files.input_format
add_format = system.files.add_format
# exit when no input format is given
if not input_format:
logger.error(
'No input format found. Specify or guess a format before parsing.')
return False
# exit if the format parser could not be imported
try:
parser = importlib.import_module('.' + input_format, __name__)
dmparser = importlib.import_module('.' + 'dome', __name__)
if add_format:
addparser = importlib.import_module('.' + add_format, __name__)
except ImportError:
logger.error(
'Parser for {:s} format not found. Program will exit.'.format(
input_format))
return False
# try parsing the base case file
logger.info('Parsing input file <{:s}>'.format(system.files.fullname))
if not parser.read(system.files.case, system):
logger.error(
'Error parsing case file {:s} with {:s} format parser.'.format(
system.files.fullname, input_format))
return False
# Try parsing the addfile
if system.files.addfile:
if not system.files.add_format:
logger.error('Unknown addfile format.')
return
logger.info('Parsing additional file {:s}.'.format(
system.files.addfile))
if not addparser.readadd(system.files.addfile, system):
logger.error(
'Error parsing addfile {:s} with {:s} format parser.'.format(
system.files.addfile, input_format))
return False
# Try parsing the dynfile with dm filter
if system.files.dynfile:
logger.info('Parsing input file {:s}.'.format(
system.files.dynfile))
if not dmparser.read(system.files.dynfile, system):
logger.error(
'Error parsing dynfile {:s} with dm format parser.'.format(
system.files.dynfile))
return False
_, s = elapsed(t)
logger.debug('Case file {:s} parsed in {:s}.'.format(
system.files.fullname, s))
return True
|
Parse input file with the given format in system.files.input_format
|
def normalize_locale(locale):
"""
Normalize locale
Extracts language code from passed in locale string to be used later
for dictionaries loading.
:param locale: string, locale (en, en_US)
:return: string, language code
"""
import re
match = re.match(r'^[a-z]+', locale.lower())
if match:
return match.group()
|
Normalize locale
Extracts language code from passed in locale string to be used later
for dictionaries loading.
:param locale: string, locale (en, en_US)
:return: string, language code
|
def find_library_darwin(cls):
"""Loads the SEGGER DLL from the installed applications.
This method accounts for the all the different ways in which the DLL
may be installed depending on the version of the DLL. Always uses
the first directory found.
SEGGER's DLL is installed in one of three ways dependent on which
which version of the SEGGER tools are installed:
======== ============================================================
Versions Directory
======== ============================================================
< 5.0.0 ``/Applications/SEGGER/JLink\\ NUMBER``
< 6.0.0 ``/Applications/SEGGER/JLink/libjlinkarm.major.minor.dylib``
>= 6.0.0 ``/Applications/SEGGER/JLink/libjlinkarm``
======== ============================================================
Args:
cls (Library): the ``Library`` class
Returns:
The path to the J-Link library files in the order they are found.
"""
dll = Library.JLINK_SDK_NAME
root = os.path.join('/', 'Applications', 'SEGGER')
if not os.path.isdir(root):
return
for d in os.listdir(root):
dir_path = os.path.join(root, d)
# Navigate through each JLink directory.
if os.path.isdir(dir_path) and d.startswith('JLink'):
files = list(f for f in os.listdir(dir_path) if
os.path.isfile(os.path.join(dir_path, f)))
# For versions >= 6.0.0 and < 5.0.0, this file will exist, so
# we want to use this one instead of the versioned one.
if (dll + '.dylib') in files:
yield os.path.join(dir_path, dll + '.dylib')
# For versions >= 5.0.0 and < 6.0.0, there is no strictly
# linked library file, so try and find the versioned one.
for f in files:
if f.startswith(dll):
yield os.path.join(dir_path, f)
|
Loads the SEGGER DLL from the installed applications.
This method accounts for the all the different ways in which the DLL
may be installed depending on the version of the DLL. Always uses
the first directory found.
SEGGER's DLL is installed in one of three ways dependent on which
which version of the SEGGER tools are installed:
======== ============================================================
Versions Directory
======== ============================================================
< 5.0.0 ``/Applications/SEGGER/JLink\\ NUMBER``
< 6.0.0 ``/Applications/SEGGER/JLink/libjlinkarm.major.minor.dylib``
>= 6.0.0 ``/Applications/SEGGER/JLink/libjlinkarm``
======== ============================================================
Args:
cls (Library): the ``Library`` class
Returns:
The path to the J-Link library files in the order they are found.
|
def fix_imports(script):
"""
Replace "from PyQt5 import" by "from pyqode.qt import".
:param script: script path
"""
with open(script, 'r') as f_script:
lines = f_script.read().splitlines()
new_lines = []
for l in lines:
if l.startswith("import "):
l = "from . " + l
if "from PyQt5 import" in l:
l = l.replace("from PyQt5 import", "from pyqode.qt import")
new_lines.append(l)
with open(script, 'w') as f_script:
f_script.write("\n".join(new_lines))
|
Replace "from PyQt5 import" by "from pyqode.qt import".
:param script: script path
|
def update_metadata(self, resource, keys_vals):
"""
Updates key-value pairs with the given resource.
Will attempt to update all key-value pairs even if some fail.
Keys must already exist.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to update on
the given resource.
Raises:
HTTPErrorList on failure.
"""
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.update(resource, keys_vals)
|
Updates key-value pairs with the given resource.
Will attempt to update all key-value pairs even if some fail.
Keys must already exist.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to update on
the given resource.
Raises:
HTTPErrorList on failure.
|
def remove_from_bin(self, name):
""" Remove an object from the bin folder. """
self.__remove_path(os.path.join(self.root_dir, "bin", name))
|
Remove an object from the bin folder.
|
def create_archive(path, remove_path=True):
"""
Creates a tar.gz of the path using the path basename + "tar.gz"
The resulting file is in the parent directory of the original path, and
the original path is removed.
"""
root_path = os.path.dirname(path)
relative_path = os.path.basename(path)
archive_path = path + ".tar.gz"
cmd = [["tar", "-C", root_path, "-czf", archive_path, relative_path]]
call(cmd, env=SAFE_ENV)
if remove_path:
fs.remove(path)
return archive_path
|
Creates a tar.gz of the path using the path basename + "tar.gz"
The resulting file is in the parent directory of the original path, and
the original path is removed.
|
def update_positions(self, time, xs, ys, zs, vxs, vys, vzs,
ethetas, elongans, eincls,
ds=None, Fs=None, ignore_effects=False):
"""
TODO: add documentation
all arrays should be for the current time, but iterable over all bodies
"""
self.xs = np.array(_value(xs))
self.ys = np.array(_value(ys))
self.zs = np.array(_value(zs))
for starref,body in self.items():
body.update_position(time, xs, ys, zs, vxs, vys, vzs,
ethetas, elongans, eincls,
ds=ds, Fs=Fs, ignore_effects=ignore_effects)
|
TODO: add documentation
all arrays should be for the current time, but iterable over all bodies
|
def RollbackAll(close=None):
"""
Rollback all transactions, according Local.conn
"""
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.rollback()
|
Rollback all transactions, according Local.conn
|
def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips
|
Get a network interface.
|
def secretfile_args(parser):
"""Add Secretfile management command line arguments to parser"""
parser.add_argument('--secrets',
dest='secrets',
help='Path where secrets are stored',
default=os.path.join(os.getcwd(), ".secrets"))
parser.add_argument('--policies',
dest='policies',
help='Path where policies are stored',
default=os.path.join(os.getcwd(), "vault", ""))
parser.add_argument('--secretfile',
dest='secretfile',
help='Secretfile to use',
default=os.path.join(os.getcwd(), "Secretfile"))
parser.add_argument('--tags',
dest='tags',
help='Tags of things to seed',
default=[],
type=str,
action='append')
parser.add_argument('--include',
dest='include',
help='Specify paths to include',
default=[],
type=str,
action='append')
parser.add_argument('--exclude',
dest='exclude',
help='Specify paths to exclude',
default=[],
type=str,
action='append')
|
Add Secretfile management command line arguments to parser
|
def defaultSTDPKernel(preSynActivation,
postSynActivation,
dt,
inhibitoryPresyn=False,
inhibitoryPostsyn=False):
"""
This function implements a modified version of the STDP kernel from
Widloski & Fiete, 2014.
:param preSynActivation: Vector of pre-synaptic activations
:param postSynActivation: Vector of post-synaptic activations
:param dt: the difference in time between the two (in seconds), positive if
after and negative if before
:return: A matrix of synapse weight changes.
"""
stdpScaler = 1
stdpTimeScaler = 1.
# Set up STDP directions
if inhibitoryPresyn and not inhibitoryPostsyn:
#I-E, anti-Hebbian (weakening inhibitory connections)
stdpScaler *= 1
elif not inhibitoryPresyn and inhibitoryPostsyn:
# E-I, Hebbian
stdpScaler *= 1
elif inhibitoryPresyn and inhibitoryPostsyn:
# I-I, Hebbian (strengthening inhibitory connections)
stdpScaler *= -1
# Set up parameters
if dt < 0 and not inhibitoryPresyn:
# Anti-causal
stdpScaler *= 1
stdpTimeScaler *= 3
elif dt > 0 and not inhibitoryPresyn:
stdpScaler *= 1.2
stdpTimeScaler *= 4
elif dt > 0 and inhibitoryPresyn:
stdpScaler *= .5
stdpTimeScaler *= 4
elif dt < 0 and inhibitoryPresyn:
stdpScaler *= 1
stdpTimeScaler *= 2
timeFactor = np.exp(-1*np.abs(dt)/(STDP_TIME_CONSTANT*stdpTimeScaler))
updates = np.outer(preSynActivation*timeFactor*np.sign(dt)*stdpScaler,
postSynActivation)
return updates
|
This function implements a modified version of the STDP kernel from
Widloski & Fiete, 2014.
:param preSynActivation: Vector of pre-synaptic activations
:param postSynActivation: Vector of post-synaptic activations
:param dt: the difference in time between the two (in seconds), positive if
after and negative if before
:return: A matrix of synapse weight changes.
|
def assets2s3():
""" Upload assets files to S3 """
import flask_s3
header("Assets2S3...")
print("")
print("Building assets files..." )
print("")
build_assets(application.app)
print("")
print("Uploading assets files to S3 ...")
flask_s3.create_all(application.app)
print("")
|
Upload assets files to S3
|
def _register_function(func, con):
"""Register a Python callable with a SQLite connection `con`.
Parameters
----------
func : callable
con : sqlalchemy.Connection
"""
nargs = number_of_arguments(func)
con.connection.connection.create_function(func.__name__, nargs, func)
|
Register a Python callable with a SQLite connection `con`.
Parameters
----------
func : callable
con : sqlalchemy.Connection
|
def __add_sentence_root_node(self, sent_number):
"""
adds the root node of a sentence to the graph and the list of sentences
(``self.sentences``). the node has a ``tokens` attribute, which
contains a list of the tokens (token node IDs) of this sentence.
Parameters
----------
sent_number : int
the index of the sentence within the document
Results
-------
sent_id : str
the ID of the sentence
"""
sent_id = 's{}'.format(sent_number)
self.add_node(sent_id, layers={self.ns, self.ns+':sentence'},
tokens=[])
self.add_edge(self.root, sent_id,
layers={self.ns, self.ns+':sentence'},
edge_type=EdgeTypes.dominance_relation)
self.sentences.append(sent_id)
return sent_id
|
adds the root node of a sentence to the graph and the list of sentences
(``self.sentences``). the node has a ``tokens` attribute, which
contains a list of the tokens (token node IDs) of this sentence.
Parameters
----------
sent_number : int
the index of the sentence within the document
Results
-------
sent_id : str
the ID of the sentence
|
def cleanup_relations(self):
"""Cleanup listing relations"""
collections = self.collections
for relation in [x for col in collections.values()
for x in col.model.relations.values()]:
db.session.query(relation)\
.filter(~relation.listing.any())\
.delete(synchronize_session=False)
db.session.commit()
|
Cleanup listing relations
|
def FromString(cls, desc):
"""Create a new stimulus from a description string.
The string must have the format:
[time: ][system ]input X = Y
where X and Y are integers. The time, if given must
be a time_interval, which is an integer followed by a
time unit such as second(s), minute(s), etc.
Args:
desc (str): A string description of the stimulus.
Returns:
SimulationStimulus: The parsed stimulus object.
"""
if language.stream is None:
language.get_language()
parse_exp = Optional(time_interval('time') - Literal(':').suppress()) - language.stream('stream') - Literal('=').suppress() - number('value')
try:
data = parse_exp.parseString(desc)
time = 0
if 'time' in data:
time = data['time'][0]
return SimulationStimulus(time, data['stream'][0], data['value'])
except (ParseException, ParseSyntaxException):
raise ArgumentError("Could not parse stimulus descriptor", descriptor=desc)
|
Create a new stimulus from a description string.
The string must have the format:
[time: ][system ]input X = Y
where X and Y are integers. The time, if given must
be a time_interval, which is an integer followed by a
time unit such as second(s), minute(s), etc.
Args:
desc (str): A string description of the stimulus.
Returns:
SimulationStimulus: The parsed stimulus object.
|
def RenderWidget(self):
"""Returns a QWidget subclass instance. Exact class depends on self.type"""
t = self.type
if t == int:
ret = QSpinBox()
ret.setMaximum(999999999)
ret.setValue(self.value)
elif t == float:
ret = QLineEdit()
ret.setText(str(self.value))
elif t == bool:
ret = QCheckBox()
ret.setChecked(self.value)
else: # str, list left
ret = QLineEdit()
ret.setText(str(self.value))
if self.toolTip is not None:
ret.setToolTip(self.toolTip)
self.widget = ret
return ret
|
Returns a QWidget subclass instance. Exact class depends on self.type
|
def list(self, mask=None):
"""List existing placement groups
Calls SoftLayer_Account::getPlacementGroups
"""
if mask is None:
mask = "mask[id, name, createDate, rule, guestCount, backendRouter[id, hostname]]"
groups = self.client.call('Account', 'getPlacementGroups', mask=mask, iter=True)
return groups
|
List existing placement groups
Calls SoftLayer_Account::getPlacementGroups
|
def get_email_domain(emailaddr):
"""
Return the domain component of an email address. Returns None if the
provided string cannot be parsed as an email address.
>>> get_email_domain('test@example.com')
'example.com'
>>> get_email_domain('test+trailing@example.com')
'example.com'
>>> get_email_domain('Example Address <test@example.com>')
'example.com'
>>> get_email_domain('foobar')
>>> get_email_domain('foo@bar@baz')
'bar'
>>> get_email_domain('foobar@')
>>> get_email_domain('@foobar')
"""
realname, address = email.utils.parseaddr(emailaddr)
try:
username, domain = address.split('@')
if not username:
return None
return domain or None
except ValueError:
return None
|
Return the domain component of an email address. Returns None if the
provided string cannot be parsed as an email address.
>>> get_email_domain('test@example.com')
'example.com'
>>> get_email_domain('test+trailing@example.com')
'example.com'
>>> get_email_domain('Example Address <test@example.com>')
'example.com'
>>> get_email_domain('foobar')
>>> get_email_domain('foo@bar@baz')
'bar'
>>> get_email_domain('foobar@')
>>> get_email_domain('@foobar')
|
def draw(self):
""" Draw the Mesh if it's visible, from the perspective of the camera and lit by the light. The function sends the uniforms"""
if not self.vao:
self.vao = VAO(indices=self.array_indices)
self._fill_vao()
if self.visible:
if self.dynamic:
for vbo in self.vbos:
vbo._buffer_subdata()
if self.drawmode == gl.GL_POINTS:
gl.glPointSize(self.point_size)
for texture in self.textures:
texture.bind()
with self.vao as vao:
self.uniforms.send()
vao.draw(mode=self.drawmode)
for texture in self.textures:
texture.unbind()
|
Draw the Mesh if it's visible, from the perspective of the camera and lit by the light. The function sends the uniforms
|
def time_sp(self):
"""
Writing specifies the amount of time the motor will run when using the
`run-timed` command. Reading returns the current value. Units are in
milliseconds.
"""
self._time_sp, value = self.get_attr_int(self._time_sp, 'time_sp')
return value
|
Writing specifies the amount of time the motor will run when using the
`run-timed` command. Reading returns the current value. Units are in
milliseconds.
|
def load_plugin(self, manifest, *args):
"""
Loads a plugin from the given manifest
:param manifest: The manifest to use to load the plugin
:param args: Arguments to pass to the plugin
"""
if self.get_plugin_loaded(manifest["name"]):
self._logger.debug("Plugin {} is already loaded.".format(manifest["name"]))
return
try:
self._logger.debug("Attempting to load plugin {}.".format(manifest["name"]))
for dependency in manifest.get("dependencies", []):
if not self.get_plugin_loaded(dependency):
self._logger.debug("Must load dependency {} first.".format(dependency))
if self.get_manifest(dependency) is None:
self._logger.error("Dependency {} could not be found.".format(dependency))
else:
self.load_plugin(self.get_manifest(dependency), *args)
not_loaded = [i for i in manifest.get("dependencies", []) if not self.get_plugin_loaded(i)]
if len(not_loaded) != 0:
self._logger.error("Plugin {} failed to load due to missing dependencies. Dependencies: {}".format(
manifest["name"], ", ".join(not_loaded)
))
return
if PY3:
spec = importlib.util.spec_from_file_location(
manifest.get("module_name", manifest["name"].replace(" ", "_")),
os.path.join(manifest["path"], manifest.get("main_path", "__init__.py"))
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
else:
module = imp.load_source(
manifest.get("module_name", manifest["name"].replace(" ", "_")),
os.path.join(manifest["path"], manifest.get("main_path", "__init__.py"))
)
module_class = manifest.get("main_class", "Plugin")
plugin_class = getattr(module, module_class)
if issubclass(plugin_class, self._plugin_class):
plugin = plugin_class(manifest, *args)
else:
self._logger.error("Failed to load {} due to invalid baseclass.".format(manifest["name"]))
return
self._plugins[manifest["name"]] = plugin
self._modules[manifest["name"]] = module
self._logger.debug("Plugin {} loaded.".format(manifest["name"]))
except:
exc_path = os.path.join(manifest["path"], "error.log")
with open(exc_path, "w") as f:
f.write(traceback.format_exc(5))
self._logger.error("Failed to load plugin {}. Error log written to {}.".format(manifest["name"], exc_path))
|
Loads a plugin from the given manifest
:param manifest: The manifest to use to load the plugin
:param args: Arguments to pass to the plugin
|
def add_index(collection,
name,
fields,
transformer=None,
unique=False,
case_insensitive=False):
"""
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
"""
assert len(name) > 0
assert len(fields) > 0
indexes = _db[collection].indexes
index = indexes.setdefault(name, aadict())
index.transformer = transformer
index.value_map = {} # json([value]) => set(object_id)
index.unique = unique
index.case_insensitive = case_insensitive
index.fields = fields
for obj in each_object(collection):
_add_to_index(index, obj)
_logger.info('added %s, %s index to collection %s on fields: %s',
'unique' if unique else 'non-unique',
'case-insensitive' if case_insensitive else 'case-sensitive',
collection, ', '.join(fields))
|
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
|
def pysal_G(self, **kwargs):
"""
Compute Getis and Ord’s G for GeoRaster
Usage:
geo.pysal_G(permutations = 1000, rook=True)
arguments passed to raster_weights() and pysal.G
See help(gr.raster_weights), help(pysal.G) for options
"""
if self.weights is None:
self.raster_weights(**kwargs)
rasterf = self.raster.flatten()
rasterf = rasterf[rasterf.mask==False]
self.G = pysal.G(rasterf, self.weights, **kwargs)
|
Compute Getis and Ord’s G for GeoRaster
Usage:
geo.pysal_G(permutations = 1000, rook=True)
arguments passed to raster_weights() and pysal.G
See help(gr.raster_weights), help(pysal.G) for options
|
def _clean_metadata(self):
"""
the long description doesn't load properly (gets unwanted indents),
so fix it.
"""
desc = self.metadata.get_long_description()
if not isinstance(desc, six.text_type):
desc = desc.decode('utf-8')
lines = io.StringIO(desc)
def trim_eight_spaces(line):
if line.startswith(' ' * 8):
line = line[8:]
return line
lines = itertools.chain(
itertools.islice(lines, 1),
six.moves.map(trim_eight_spaces, lines),
)
self.metadata.long_description = ''.join(lines)
|
the long description doesn't load properly (gets unwanted indents),
so fix it.
|
def write_stream(self, stream, validate=True):
"""
Write :attr:`metainfo` to a file-like object
Before any data is written, `stream` is truncated if possible.
:param stream: Writable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` first
:raises WriteError: if writing to `stream` fails
:raises MetainfoError: if `validate` is `True` and :attr:`metainfo`
contains invalid data
"""
content = self.dump(validate=validate)
try:
# Remove existing data from stream *after* dump() didn't raise
# anything so we don't destroy it prematurely.
if stream.seekable():
stream.seek(0)
stream.truncate(0)
stream.write(content)
except OSError as e:
raise error.WriteError(e.errno)
|
Write :attr:`metainfo` to a file-like object
Before any data is written, `stream` is truncated if possible.
:param stream: Writable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` first
:raises WriteError: if writing to `stream` fails
:raises MetainfoError: if `validate` is `True` and :attr:`metainfo`
contains invalid data
|
def text(self, value):
"""Set Text content for Comment (validation of input)"""
if value in (None, '') or value.strip() == "":
raise AttributeError("Empty text value is invalid.")
self._text = value
|
Set Text content for Comment (validation of input)
|
def get(self, copy=False):
"""Return the value of the attribute"""
array = getattr(self.owner, self.name)
if copy:
return array.copy()
else:
return array
|
Return the value of the attribute
|
def retry(retries=10, wait=5, catch=None):
"""
Decorator to retry on exceptions raised
"""
catch = catch or (Exception,)
def real_retry(function):
def wrapper(*args, **kwargs):
for _ in range(retries):
try:
ret = function(*args, **kwargs)
return ret
except catch:
time.sleep(wait)
except Exception as e:
raise e
else:
raise DSBException('Retries limit exceded.')
return wrapper
return real_retry
|
Decorator to retry on exceptions raised
|
def global_closeness_centrality(g, node=None, normalize=True):
"""
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
"""
if not node:
C = {}
for node in g.nodes():
C[node] = global_closeness_centrality(g, node, normalize=normalize)
return C
values = nx.shortest_path_length(g, node).values()
c = sum([1./pl for pl in values if pl != 0.]) / len(g)
if normalize:
ac = 0
for sg in nx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
aspl = nx.average_shortest_path_length(sg)
ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 )
c = c/ac
return c
|
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
|
def purge(self):
"""Deletes all tasks in the queue."""
try:
return self._api.purge()
except AttributeError:
while True:
lst = self.list()
if len(lst) == 0:
break
for task in lst:
self.delete(task)
self.wait()
return self
|
Deletes all tasks in the queue.
|
def estimate_lmax(self, method='lanczos'):
r"""Estimate the Laplacian's largest eigenvalue (cached).
The result is cached and accessible by the :attr:`lmax` property.
Exact value given by the eigendecomposition of the Laplacian, see
:func:`compute_fourier_basis`. That estimation is much faster than the
eigendecomposition.
Parameters
----------
method : {'lanczos', 'bounds'}
Whether to estimate the largest eigenvalue with the implicitly
restarted Lanczos method, or to return an upper bound on the
spectrum of the Laplacian.
Notes
-----
Runs the implicitly restarted Lanczos method (as implemented in
:func:`scipy.sparse.linalg.eigsh`) with a large tolerance, then
increases the calculated largest eigenvalue by 1 percent. For much of
the PyGSP machinery, we need to approximate filter kernels on an
interval that contains the spectrum of L. The only cost of using a
larger interval is that the polynomial approximation over the larger
interval may be a slightly worse approximation on the actual spectrum.
As this is a very mild effect, it is not necessary to obtain very tight
bounds on the spectrum of L.
A faster but less tight alternative is to use known algebraic bounds on
the graph Laplacian.
Examples
--------
>>> G = graphs.Logo()
>>> G.compute_fourier_basis() # True value.
>>> print('{:.2f}'.format(G.lmax))
13.78
>>> G.estimate_lmax(method='lanczos') # Estimate.
>>> print('{:.2f}'.format(G.lmax))
13.92
>>> G.estimate_lmax(method='bounds') # Upper bound.
>>> print('{:.2f}'.format(G.lmax))
18.58
"""
if method == self._lmax_method:
return
self._lmax_method = method
if method == 'lanczos':
try:
# We need to cast the matrix L to a supported type.
# TODO: not good for memory. Cast earlier?
lmax = sparse.linalg.eigsh(self.L.asfptype(), k=1, tol=5e-3,
ncv=min(self.N, 10),
return_eigenvectors=False)
lmax = lmax[0]
assert lmax <= self._get_upper_bound() + 1e-12
lmax *= 1.01 # Increase by 1% to be robust to errors.
self._lmax = lmax
except sparse.linalg.ArpackNoConvergence:
raise ValueError('The Lanczos method did not converge. '
'Try to use bounds.')
elif method == 'bounds':
self._lmax = self._get_upper_bound()
else:
raise ValueError('Unknown method {}'.format(method))
|
r"""Estimate the Laplacian's largest eigenvalue (cached).
The result is cached and accessible by the :attr:`lmax` property.
Exact value given by the eigendecomposition of the Laplacian, see
:func:`compute_fourier_basis`. That estimation is much faster than the
eigendecomposition.
Parameters
----------
method : {'lanczos', 'bounds'}
Whether to estimate the largest eigenvalue with the implicitly
restarted Lanczos method, or to return an upper bound on the
spectrum of the Laplacian.
Notes
-----
Runs the implicitly restarted Lanczos method (as implemented in
:func:`scipy.sparse.linalg.eigsh`) with a large tolerance, then
increases the calculated largest eigenvalue by 1 percent. For much of
the PyGSP machinery, we need to approximate filter kernels on an
interval that contains the spectrum of L. The only cost of using a
larger interval is that the polynomial approximation over the larger
interval may be a slightly worse approximation on the actual spectrum.
As this is a very mild effect, it is not necessary to obtain very tight
bounds on the spectrum of L.
A faster but less tight alternative is to use known algebraic bounds on
the graph Laplacian.
Examples
--------
>>> G = graphs.Logo()
>>> G.compute_fourier_basis() # True value.
>>> print('{:.2f}'.format(G.lmax))
13.78
>>> G.estimate_lmax(method='lanczos') # Estimate.
>>> print('{:.2f}'.format(G.lmax))
13.92
>>> G.estimate_lmax(method='bounds') # Upper bound.
>>> print('{:.2f}'.format(G.lmax))
18.58
|
def format_color(text, color, use_color_setting):
"""Format text with color.
Args:
text - Text to be formatted with color if `use_color`
color - The color start string
use_color_setting - Whether or not to color
"""
if not use_color_setting:
return text
else:
return '{}{}{}'.format(color, text, NORMAL)
|
Format text with color.
Args:
text - Text to be formatted with color if `use_color`
color - The color start string
use_color_setting - Whether or not to color
|
def exam_reliability(x_axis, x_axis_new, reliable_distance, precision=0.0001):
"""When we do linear interpolation on x_axis and derive value for
x_axis_new, we also evaluate how can we trust those interpolated
data points. This is how it works:
For each new x_axis point in x_axis new, let's say xi. Find the closest
point in x_axis, suppose the distance is #dist. Compare this to
#reliable_distance. If #dist < #reliable_distance, then we can trust it,
otherwise, we can't.
The precision is to handle decimal value's precision problem. Because
1.0 may actually is 1.00000000001 or 0.999999999999 in computer system.
So we define that: if ``dist`` + ``precision`` <= ``reliable_distance``, then we
can trust it, else, we can't.
Here is an O(n) algorithm implementation. A lots of improvement than
classic binary search one, which is O(n^2).
"""
x_axis = x_axis[::-1]
x_axis.append(-2**32)
distance_to_closest_point = list()
for t in x_axis_new:
while 1:
try:
x = x_axis.pop()
if x <= t:
left = x
else:
right = x
x_axis.append(right)
x_axis.append(left)
left_dist, right_dist = (t - left), (right - t)
if left_dist <= right_dist:
distance_to_closest_point.append(left_dist)
else:
distance_to_closest_point.append(right_dist)
break
except:
distance_to_closest_point.append(t - left)
break
reliable_flag = list()
for dist in distance_to_closest_point:
if dist - precision - reliable_distance <= 0:
reliable_flag.append(True)
else:
reliable_flag.append(False)
return reliable_flag
|
When we do linear interpolation on x_axis and derive value for
x_axis_new, we also evaluate how can we trust those interpolated
data points. This is how it works:
For each new x_axis point in x_axis new, let's say xi. Find the closest
point in x_axis, suppose the distance is #dist. Compare this to
#reliable_distance. If #dist < #reliable_distance, then we can trust it,
otherwise, we can't.
The precision is to handle decimal value's precision problem. Because
1.0 may actually is 1.00000000001 or 0.999999999999 in computer system.
So we define that: if ``dist`` + ``precision`` <= ``reliable_distance``, then we
can trust it, else, we can't.
Here is an O(n) algorithm implementation. A lots of improvement than
classic binary search one, which is O(n^2).
|
def get_reward_and_done(board):
"""Given a representation of the board, returns reward and done."""
# Returns (reward, done) where:
# reward: -1 means lost, +1 means win, 0 means draw or continuing.
# done: True if the game is over, i.e. someone won or it is a draw.
# Sum all rows ...
all_sums = [np.sum(board[i, :]) for i in range(3)]
# ... all columns
all_sums.extend([np.sum(board[:, i]) for i in range(3)])
# and both diagonals.
all_sums.append(np.sum([board[i, i] for i in range(3)]))
all_sums.append(np.sum([board[i, 2 - i] for i in range(3)]))
if -3 in all_sums:
return -1, True
if 3 in all_sums:
return 1, True
done = True
if get_open_spaces(board):
done = False
return 0, done
|
Given a representation of the board, returns reward and done.
|
def connection_made(self, transport: asyncio.transports.Transport):
"""连接建立起来触发的回调函数.
用于设定一些参数,并将监听任务放入事件循环,如果设置了timeout,也会将timeout_callback放入事件循环
Parameters:
transport (asyncio.Transports): - 连接的传输对象
"""
self._transport = transport
self._remote_host = self._transport.get_extra_info('peername')
self._extra = {"client": str(self._remote_host)}
self.connections.add(self)
self._stream_reader = asyncio.StreamReader(loop=self._loop)
self._stream_writer = asyncio.StreamWriter(transport, self,
self._stream_reader,
self._loop)
super().connection_made(transport)
if self.timeout:
self._timeout_handler = self._loop.call_soon(
self.timeout_callback)
self._handlertask = asyncio.ensure_future(self.query_handler())
if self.debug:
access_logger.info("connected", extra=self._extra)
|
连接建立起来触发的回调函数.
用于设定一些参数,并将监听任务放入事件循环,如果设置了timeout,也会将timeout_callback放入事件循环
Parameters:
transport (asyncio.Transports): - 连接的传输对象
|
def build_duration_pretty(self):
"""Return the difference between build and build_done states, in a human readable format"""
from ambry.util import pretty_time
from time import time
if not self.state.building:
return None
built = self.state.built or time()
try:
return pretty_time(int(built) - int(self.state.building))
except TypeError: # one of the values is None or not a number
return None
|
Return the difference between build and build_done states, in a human readable format
|
def payment_init(self, wallet):
"""
Marks all accounts in wallet as available for being used as a payment
session.
:param wallet: Wallet to init payment in
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.payment_init(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('payment_init', payload)
return resp['status'] == 'Ready'
|
Marks all accounts in wallet as available for being used as a payment
session.
:param wallet: Wallet to init payment in
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.payment_init(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
|
def default_instance(cls):
"""
For use like a singleton, return the existing instance of the object
or a new instance
"""
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = FlowsLogger()
return cls._instance
|
For use like a singleton, return the existing instance of the object
or a new instance
|
def call_hook(self, name, **kwargs):
""" Call all hooks registered with this name. Returns a list of the returns values of the hooks (in the order the hooks were added)"""
return [y for y in [x(**kwargs) for x, _ in self._hooks.get(name, [])] if y is not None]
|
Call all hooks registered with this name. Returns a list of the returns values of the hooks (in the order the hooks were added)
|
def set_spcPct(self, value):
"""
Set spacing to *value* lines, e.g. 1.75 lines. A ./a:spcPts child is
removed if present.
"""
self._remove_spcPts()
spcPct = self.get_or_add_spcPct()
spcPct.val = value
|
Set spacing to *value* lines, e.g. 1.75 lines. A ./a:spcPts child is
removed if present.
|
def iterrows(self, workbook=None):
"""
Yield rows as lists of data.
The data is exactly as it is in the source pandas DataFrames and
any formulas are not resolved.
"""
resolved_tables = []
max_height = 0
max_width = 0
# while yielding rows __formula_values is updated with any formula values set on Expressions
self.__formula_values = {}
for name, (table, (row, col)) in list(self.__tables.items()):
# get the resolved 2d data array from the table
#
# expressions with no explicit table will use None when calling
# get_table/get_table_pos, which should return the current table.
#
self.__tables[None] = (table, (row, col))
data = table.get_data(workbook, row, col, self.__formula_values)
del self.__tables[None]
height, width = data.shape
upper_left = (row, col)
lower_right = (row + height - 1, col + width - 1)
max_height = max(max_height, lower_right[0] + 1)
max_width = max(max_width, lower_right[1] + 1)
resolved_tables.append((name, data, upper_left, lower_right))
for row, col in self.__values.keys():
max_width = max(max_width, row+1)
max_height = max(max_height, col+1)
# Build the whole table up-front. Doing it row by row is too slow.
table = [[None] * max_width for i in range(max_height)]
for name, data, upper_left, lower_right in resolved_tables:
for i, r in enumerate(range(upper_left[0], lower_right[0]+1)):
for j, c in enumerate(range(upper_left[1], lower_right[1]+1)):
table[r][c] = data[i][j]
for (r, c), value in self.__values.items():
if isinstance(value, Value):
value = value.value
if isinstance(value, Expression):
if value.has_value:
self.__formula_values[(r, c)] = value.value
value = value.get_formula(workbook, r, c)
table[r][c] = value
for row in table:
yield row
|
Yield rows as lists of data.
The data is exactly as it is in the source pandas DataFrames and
any formulas are not resolved.
|
def _parse_lines(self, diff_lines):
"""
Given the diff lines output from `git diff` for a particular
source file, return a tuple of `(ADDED_LINES, DELETED_LINES)`
where `ADDED_LINES` and `DELETED_LINES` are lists of line
numbers added/deleted respectively.
Raises a `GitDiffError` if the diff lines are in an invalid format.
"""
added_lines = []
deleted_lines = []
current_line_new = None
current_line_old = None
for line in diff_lines:
# If this is the start of the hunk definition, retrieve
# the starting line number
if line.startswith('@@'):
line_num = self._parse_hunk_line(line)
current_line_new, current_line_old = line_num, line_num
# This is an added/modified line, so store the line number
elif line.startswith('+'):
# Since we parse for source file sections before
# calling this method, we're guaranteed to have a source
# file specified. We check anyway just to be safe.
if current_line_new is not None:
# Store the added line
added_lines.append(current_line_new)
# Increment the line number in the file
current_line_new += 1
# This is a deleted line that does not exist in the final
# version, so skip it
elif line.startswith('-'):
# Since we parse for source file sections before
# calling this method, we're guaranteed to have a source
# file specified. We check anyway just to be safe.
if current_line_old is not None:
# Store the deleted line
deleted_lines.append(current_line_old)
# Increment the line number in the file
current_line_old += 1
# This is a line in the final version that was not modified.
# Increment the line number, but do not store this as a changed
# line.
else:
if current_line_old is not None:
current_line_old += 1
if current_line_new is not None:
current_line_new += 1
# If we are not in a hunk, then ignore the line
else:
pass
return added_lines, deleted_lines
|
Given the diff lines output from `git diff` for a particular
source file, return a tuple of `(ADDED_LINES, DELETED_LINES)`
where `ADDED_LINES` and `DELETED_LINES` are lists of line
numbers added/deleted respectively.
Raises a `GitDiffError` if the diff lines are in an invalid format.
|
def cape_cin(pressure, temperature, dewpt, parcel_profile):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of
the measured temperature profile and parcel profile are linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpt : `pint.Quantity`
The atmospheric dew point corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel
Returns
-------
`pint.Quantity`
Convective available potential energy (CAPE).
`pint.Quantity`
Convective inhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpt,
parcel_temperature_profile=parcel_profile)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units('J/kg'), 0 * units('J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpt,
parcel_temperature_profile=parcel_profile)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x, lfc_pressure) & _greater_or_close(x, el_pressure)
x_clipped = x[p_mask]
y_clipped = y[p_mask]
cape = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x, lfc_pressure)
x_clipped = x[p_mask]
y_clipped = y[p_mask]
cin = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
return cape, cin
|
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of
the measured temperature profile and parcel profile are linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpt : `pint.Quantity`
The atmospheric dew point corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel
Returns
-------
`pint.Quantity`
Convective available potential energy (CAPE).
`pint.Quantity`
Convective inhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
|
def find(cls, dtype):
"""Returns the NDS2 type corresponding to the given python type
"""
try:
return cls._member_map_[dtype]
except KeyError:
try:
dtype = numpy.dtype(dtype).type
except TypeError:
for ndstype in cls._member_map_.values():
if ndstype.value is dtype:
return ndstype
else:
for ndstype in cls._member_map_.values():
if ndstype.value and ndstype.numpy_dtype is dtype:
return ndstype
raise ValueError('%s is not a valid %s' % (dtype, cls.__name__))
|
Returns the NDS2 type corresponding to the given python type
|
def _make_value_pb(value):
"""Helper for :func:`_make_list_value_pbs`.
:type value: scalar value
:param value: value to convert
:rtype: :class:`~google.protobuf.struct_pb2.Value`
:returns: value protobufs
:raises ValueError: if value is not of a known scalar type.
"""
if value is None:
return Value(null_value="NULL_VALUE")
if isinstance(value, (list, tuple)):
return Value(list_value=_make_list_value_pb(value))
if isinstance(value, bool):
return Value(bool_value=value)
if isinstance(value, six.integer_types):
return Value(string_value=str(value))
if isinstance(value, float):
if math.isnan(value):
return Value(string_value="NaN")
if math.isinf(value):
if value > 0:
return Value(string_value="Infinity")
else:
return Value(string_value="-Infinity")
return Value(number_value=value)
if isinstance(value, datetime_helpers.DatetimeWithNanoseconds):
return Value(string_value=value.rfc3339())
if isinstance(value, datetime.datetime):
return Value(string_value=_datetime_to_rfc3339(value))
if isinstance(value, datetime.date):
return Value(string_value=value.isoformat())
if isinstance(value, six.binary_type):
value = _try_to_coerce_bytes(value)
return Value(string_value=value)
if isinstance(value, six.text_type):
return Value(string_value=value)
if isinstance(value, ListValue):
return Value(list_value=value)
raise ValueError("Unknown type: %s" % (value,))
|
Helper for :func:`_make_list_value_pbs`.
:type value: scalar value
:param value: value to convert
:rtype: :class:`~google.protobuf.struct_pb2.Value`
:returns: value protobufs
:raises ValueError: if value is not of a known scalar type.
|
def same_guid(post, parameter=DEFAULT_SIMILARITY_TIMESPAN):
'''Skip posts with exactly same GUID.
Parameter: comparison timespan, seconds (int, 0 = inf, default: {0}).'''
from feedjack.models import Post
if isinstance(parameter, types.StringTypes): parameter = int(parameter.strip())
similar = Post.objects.filtered(for_display=False)\
.exclude(id=post.id).filter(guid=post.guid)
if parameter:
similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=parameter))
return not bool(similar.exists())
|
Skip posts with exactly same GUID.
Parameter: comparison timespan, seconds (int, 0 = inf, default: {0}).
|
def _script_names(dist, script_name, is_gui):
"""Create the fully qualified name of the files created by
{console,gui}_scripts for the given ``dist``.
Returns the list of file names
"""
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
exe_name = os.path.join(bin_dir, script_name)
paths_to_remove = [exe_name]
if WINDOWS:
paths_to_remove.append(exe_name + '.exe')
paths_to_remove.append(exe_name + '.exe.manifest')
if is_gui:
paths_to_remove.append(exe_name + '-script.pyw')
else:
paths_to_remove.append(exe_name + '-script.py')
return paths_to_remove
|
Create the fully qualified name of the files created by
{console,gui}_scripts for the given ``dist``.
Returns the list of file names
|
def bar3_chart(self, title, labels, data1, file_name, data2, data3, legend=["", ""]):
"""
Generate a bar plot with three columns in each x position and save it to file_name
:param title: title to be used in the chart
:param labels: list of labels for the x axis
:param data1: values for the first columns
:param file_name: name of the file in which to save the chart
:param data2: values for the second columns
:param data3: values for the third columns
:param legend: legend to be shown in the chart
:return:
"""
colors = ["orange", "grey"]
data1 = self.__convert_none_to_zero(data1)
data2 = self.__convert_none_to_zero(data2)
data3 = self.__convert_none_to_zero(data3)
fig, ax = plt.subplots(1)
xpos = np.arange(len(data1))
width = 0.28
plt.title(title)
y_pos = np.arange(len(data1))
ppl.bar(xpos + width + width, data3, color="orange", width=0.28, annotate=True)
ppl.bar(xpos + width, data1, color='grey', width=0.28, annotate=True)
ppl.bar(xpos, data2, grid='y', width=0.28, annotate=True)
plt.xticks(xpos + width, labels)
plt.legend(legend, loc=2)
os.makedirs(os.path.dirname(file_name), exist_ok=True)
plt.savefig(file_name)
plt.close()
|
Generate a bar plot with three columns in each x position and save it to file_name
:param title: title to be used in the chart
:param labels: list of labels for the x axis
:param data1: values for the first columns
:param file_name: name of the file in which to save the chart
:param data2: values for the second columns
:param data3: values for the third columns
:param legend: legend to be shown in the chart
:return:
|
def _UserUpdateIgnoredDirs(self, origIgnoredDirs = []):
"""
Add ignored directories to database table. Always called if the
database table is empty.
User can build a list of entries to add to the database table
(one entry at a time). Once finished they select the finish option
and all entries will be added to the table. They can reset the
list at any time before finishing.
Parameters
----------
origIgnoredDirs : list [optional : default = []]
List of original ignored directories from database table.
Returns
----------
string
List of updated ignored directories from database table.
"""
ignoredDirs = list(origIgnoredDirs)
inputDone = None
while inputDone is None:
prompt = "Enter new directory to ignore (e.g. DONE), " \
"'r' to reset directory list, " \
"'f' to finish or " \
"'x' to exit: "
response = goodlogging.Log.Input("CLEAR", prompt)
if response.lower() == 'x':
sys.exit(0)
elif response.lower() == 'f':
inputDone = 1
elif response.lower() == 'r':
ignoredDirs = []
else:
if response is not None:
ignoredDirs.append(response)
ignoredDirs = set(ignoredDirs)
origIgnoredDirs = set(origIgnoredDirs)
if ignoredDirs != origIgnoredDirs:
self._db.PurgeIgnoredDirs()
for ignoredDir in ignoredDirs:
self._db.AddIgnoredDir(ignoredDir)
return list(ignoredDirs)
|
Add ignored directories to database table. Always called if the
database table is empty.
User can build a list of entries to add to the database table
(one entry at a time). Once finished they select the finish option
and all entries will be added to the table. They can reset the
list at any time before finishing.
Parameters
----------
origIgnoredDirs : list [optional : default = []]
List of original ignored directories from database table.
Returns
----------
string
List of updated ignored directories from database table.
|
def liquid_precip_ratio(pr, prsn=None, tas=None, freq='QS-DEC'):
r"""Ratio of rainfall to total precipitation
The ratio of total liquid precipitation over the total precipitation. If solid precipitation is not provided,
then precipitation is assumed solid if the temperature is below 0°C.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [Kg m-2 s-1] or [mm].
prsn : xarray.DataArray
Mean daily solid precipitation flux [Kg m-2 s-1] or [mm].
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
freq : str
Resampling frequency
Returns
-------
xarray.DataArray
Ratio of rainfall to total precipitation
Notes
-----
Let :math:`PR_i` be the mean daily precipitation of day :math:`i`, then for a period :math:`j` starting at
day :math:`a` and finishing on day :math:`b`:
.. math::
PR_{ij} = \sum_{i=a}^{b} PR_i
PRwet_{ij}
See also
--------
winter_rain_ratio
"""
if prsn is None:
tu = units.parse_units(tas.attrs['units'].replace('-', '**-'))
fu = 'degC'
frz = 0
if fu != tu:
frz = units.convert(frz, fu, tu)
prsn = pr.where(tas < frz, 0)
tot = pr.resample(time=freq).sum(dim='time')
rain = tot - prsn.resample(time=freq).sum(dim='time')
ratio = rain / tot
return ratio
|
r"""Ratio of rainfall to total precipitation
The ratio of total liquid precipitation over the total precipitation. If solid precipitation is not provided,
then precipitation is assumed solid if the temperature is below 0°C.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [Kg m-2 s-1] or [mm].
prsn : xarray.DataArray
Mean daily solid precipitation flux [Kg m-2 s-1] or [mm].
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
freq : str
Resampling frequency
Returns
-------
xarray.DataArray
Ratio of rainfall to total precipitation
Notes
-----
Let :math:`PR_i` be the mean daily precipitation of day :math:`i`, then for a period :math:`j` starting at
day :math:`a` and finishing on day :math:`b`:
.. math::
PR_{ij} = \sum_{i=a}^{b} PR_i
PRwet_{ij}
See also
--------
winter_rain_ratio
|
def get_first_language(self, site_id=None):
"""
Return the first language for the current site.
This can be used for user interfaces, where the languages are displayed in tabs.
"""
if site_id is None:
site_id = getattr(settings, 'SITE_ID', None)
try:
return self[site_id][0]['code']
except (KeyError, IndexError):
# No configuration, always fallback to default language.
# This is essentially a non-multilingual configuration.
return self['default']['code']
|
Return the first language for the current site.
This can be used for user interfaces, where the languages are displayed in tabs.
|
async def uint(self, elem, elem_type, params=None):
"""
Integer types
:param elem:
:param elem_type:
:param params:
:return:
"""
if self.writing:
return await x.dump_uint(self.iobj, elem, elem_type.WIDTH)
else:
return await x.load_uint(self.iobj, elem_type.WIDTH)
|
Integer types
:param elem:
:param elem_type:
:param params:
:return:
|
def migrateUp(self):
"""
Copy this LoginAccount and all associated LoginMethods from my store
(which is assumed to be a SubStore, most likely a user store) into the
site store which contains it.
"""
siteStore = self.store.parent
def _():
# No convenience method for the following because needing to do it is
# *rare*. It *should* be ugly; 99% of the time if you need to do this
# you're making a mistake. -glyph
siteStoreSubRef = siteStore.getItemByID(self.store.idInParent)
self.cloneInto(siteStore, siteStoreSubRef)
IScheduler(self.store).migrateUp()
siteStore.transact(_)
|
Copy this LoginAccount and all associated LoginMethods from my store
(which is assumed to be a SubStore, most likely a user store) into the
site store which contains it.
|
def linguist_field_names(self):
"""
Returns linguist field names (example: "title" and "title_fr").
"""
return list(self.model._linguist.fields) + list(
utils.get_language_fields(self.model._linguist.fields)
)
|
Returns linguist field names (example: "title" and "title_fr").
|
def check_suspension(user_twitter_id_list):
"""
Looks up a list of user ids and checks whether they are currently suspended.
Input: - user_twitter_id_list: A python list of Twitter user ids in integer format to be looked-up.
Outputs: - suspended_user_twitter_id_list: A python list of suspended Twitter user ids in integer format.
- non_suspended_user_twitter_id_list: A python list of non suspended Twitter user ids in integer format.
- unknown_status_user_twitter_id_list: A python list of unknown status Twitter user ids in integer format.
"""
####################################################################################################################
# Log into my application.
####################################################################################################################
twitter = login()
####################################################################################################################
# Lookup users
####################################################################################################################
# Initialize look-up lists
suspended_user_twitter_id_list = list()
non_suspended_user_twitter_id_list = list()
unknown_status_user_twitter_id_list = list()
append_suspended_twitter_user = suspended_user_twitter_id_list.append
append_non_suspended_twitter_user = non_suspended_user_twitter_id_list.append
extend_unknown_status_twitter_user = unknown_status_user_twitter_id_list.extend
# Split twitter user id list into sub-lists of length 100 (This is the Twitter API function limit).
user_lookup_counter = 0
user_lookup_time_window_start = time.perf_counter()
for hundred_length_sub_list in chunks(list(user_twitter_id_list), 100):
# Make safe twitter request.
try:
api_result, user_lookup_counter, user_lookup_time_window_start\
= safe_twitter_request_handler(twitter_api_func=twitter.lookup_user,
call_rate_limit=60,
call_counter=user_lookup_counter,
time_window_start=user_lookup_time_window_start,
max_retries=10,
wait_period=2,
parameters=hundred_length_sub_list)
# If the call is succesful, turn hundred sub-list to a set for faster search.
hundred_length_sub_list = set(hundred_length_sub_list)
# Check who is suspended and who is not.
for hydrated_user_object in api_result:
hydrated_twitter_user_id = hydrated_user_object["id"]
if hydrated_twitter_user_id in hundred_length_sub_list:
append_non_suspended_twitter_user(hydrated_twitter_user_id)
else:
append_suspended_twitter_user(hydrated_twitter_user_id)
except twython.TwythonError:
# If the call is unsuccesful, we do not know about the status of the users.
extend_unknown_status_twitter_user(hundred_length_sub_list)
except URLError:
# If the call is unsuccesful, we do not know about the status of the users.
extend_unknown_status_twitter_user(hundred_length_sub_list)
except BadStatusLine:
# If the call is unsuccesful, we do not know about the status of the users.
extend_unknown_status_twitter_user(hundred_length_sub_list)
return suspended_user_twitter_id_list, non_suspended_user_twitter_id_list, unknown_status_user_twitter_id_list
|
Looks up a list of user ids and checks whether they are currently suspended.
Input: - user_twitter_id_list: A python list of Twitter user ids in integer format to be looked-up.
Outputs: - suspended_user_twitter_id_list: A python list of suspended Twitter user ids in integer format.
- non_suspended_user_twitter_id_list: A python list of non suspended Twitter user ids in integer format.
- unknown_status_user_twitter_id_list: A python list of unknown status Twitter user ids in integer format.
|
def bootstrap_counts_singletraj(dtraj, lagtime, n):
"""
Samples n counts at the given lagtime from the given trajectory
"""
# check if length is sufficient
L = len(dtraj)
if (lagtime > L):
raise ValueError(
'Cannot sample counts with lagtime ' + str(lagtime) + ' from a trajectory with length ' + str(L))
# sample
I = np.random.randint(0, L - lagtime - 1, size=n)
J = I + lagtime
# return state pairs
return (dtraj[I], dtraj[J])
|
Samples n counts at the given lagtime from the given trajectory
|
def tomography_basis(basis, prep_fun=None, meas_fun=None):
"""
Generate a TomographyBasis object.
See TomographyBasis for further details.abs
Args:
prep_fun (callable) optional: the function which adds preparation
gates to a circuit.
meas_fun (callable) optional: the function which adds measurement
gates to a circuit.
Returns:
TomographyBasis: A tomography basis.
"""
ret = TomographyBasis(basis)
ret.prep_fun = prep_fun
ret.meas_fun = meas_fun
return ret
|
Generate a TomographyBasis object.
See TomographyBasis for further details.abs
Args:
prep_fun (callable) optional: the function which adds preparation
gates to a circuit.
meas_fun (callable) optional: the function which adds measurement
gates to a circuit.
Returns:
TomographyBasis: A tomography basis.
|
def text(self, prompt, default=None):
"""Prompts the user for some text, with optional default"""
prompt = prompt if prompt is not None else 'Enter some text'
prompt += " [{0}]: ".format(default) if default is not None else ': '
return self.input(curry(filter_text, default=default), prompt)
|
Prompts the user for some text, with optional default
|
def blank(columns=1, name=None):
"""
Creates the grammar for a blank field.
These are for constant empty strings which should be ignored, as they are
used just as fillers.
:param columns: number of columns, which is the required number of
whitespaces
:param name: name for the field
:return: grammar for the blank field
"""
if name is None:
name = 'Blank Field'
field = pp.Regex('[ ]{' + str(columns) + '}')
field.leaveWhitespace()
field.suppress()
field.setName(name)
return field
|
Creates the grammar for a blank field.
These are for constant empty strings which should be ignored, as they are
used just as fillers.
:param columns: number of columns, which is the required number of
whitespaces
:param name: name for the field
:return: grammar for the blank field
|
def login(self, user, passwd):
'''Logs the user into SecurityCenter and stores the needed token and cookies.'''
resp = self.post('token', json={'username': user, 'password': passwd})
self._token = resp.json()['response']['token']
|
Logs the user into SecurityCenter and stores the needed token and cookies.
|
def beta_to_uni(text, strict=False):
"""
Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text.
"""
# Check if the requested configuration for conversion already has a trie
# stored otherwise convert it.
param_key = (strict,)
try:
t = _BETA_CONVERSION_TRIES[param_key]
except KeyError:
t = _create_conversion_trie(*param_key)
_BETA_CONVERSION_TRIES[param_key] = t
transform = []
idx = 0
possible_word_boundary = False
while idx < len(text):
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN])
if step:
possible_word_boundary = text[idx] in _BETA_PUNCTUATION
key, value = step
transform.append(value)
idx += len(key)
else:
possible_word_boundary = True
transform.append(text[idx])
idx += 1
# Check one last time in case there is some whitespace or punctuation at the
# end and check if the last character is a sigma.
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA:
transform[-1] = _FINAL_LC_SIGMA
converted = ''.join(transform)
return converted
|
Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text.
|
def __get_issue_notes(self, issue_id):
"""Get issue notes"""
notes = []
group_notes = self.client.notes(GitLabClient.ISSUES, issue_id)
for raw_notes in group_notes:
for note in json.loads(raw_notes):
note_id = note['id']
note['award_emoji_data'] = \
self.__get_note_award_emoji(GitLabClient.ISSUES, issue_id, note_id)
notes.append(note)
return notes
|
Get issue notes
|
def safe_lshift(a, b):
"""safe version of lshift"""
if b > MAX_SHIFT:
raise RuntimeError("Invalid left shift, max left shift is {}".format(MAX_SHIFT))
return a << b
|
safe version of lshift
|
def load_backend(backend_name):
""" load pool backend."""
try:
if len(backend_name.split(".")) > 1:
mod = import_module(backend_name)
else:
mod = import_module("spamc.backend_%s" % backend_name)
return mod
except ImportError:
error_msg = "%s isn't a spamc backend" % backend_name
raise ImportError(error_msg)
|
load pool backend.
|
def _set_origin(self, v, load=False):
"""
Setter method for origin, mapped from YANG variable /routing_system/route_map/content/set/origin (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_origin is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_origin() directly.
YANG Description: BGP origin code
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=origin.origin, is_container='container', presence=False, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP origin code', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """origin must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=origin.origin, is_container='container', presence=False, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP origin code', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""",
})
self.__origin = t
if hasattr(self, '_set'):
self._set()
|
Setter method for origin, mapped from YANG variable /routing_system/route_map/content/set/origin (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_origin is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_origin() directly.
YANG Description: BGP origin code
|
def create_file_api(conf):
""" Creates a SmartlingFileApi from the given config
"""
api_key = conf.config.get('api-key', os.environ.get('SMARTLING_API_KEY'))
project_id = conf.config.get('project-id', os.environ.get('SMARTLING_PROJECT_ID'))
if not project_id or not api_key:
raise SmarterlingError('config.api-key and config.project-id are required configuration items')
proxy_settings=None
if conf.config.has_key('proxy-settings'):
proxy_settings = ProxySettings(
conf.config.get('proxy-settings').get('username', ''),
conf.config.get('proxy-settings').get('password', ''),
conf.config.get('proxy-settings').get('host', ''),
int(conf.config.get('proxy-settings').get('port', '80')))
return SmartlingFileApiFactory().getSmartlingTranslationApi(
not conf.config.get('sandbox', False),
api_key,
project_id,
proxySettings=proxy_settings)
|
Creates a SmartlingFileApi from the given config
|
def session_hook(exception):
"""
Expects an exception with an authorization_paramaters field in its raw_json
"""
safeprint(
"The resource you are trying to access requires you to "
"re-authenticate with specific identities."
)
params = exception.raw_json["authorization_parameters"]
message = params.get("session_message")
if message:
safeprint("message: {}".format(message))
identities = params.get("session_required_identities")
if identities:
id_str = " ".join(identities)
safeprint(
"Please run\n\n"
" globus session update {}\n\n"
"to re-authenticate with the required identities".format(id_str)
)
else:
safeprint(
'Please use "globus session update" to re-authenticate '
"with specific identities".format(id_str)
)
exit_with_mapped_status(exception.http_status)
|
Expects an exception with an authorization_paramaters field in its raw_json
|
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
tic = time.time()
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, (int, basestring, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
map(theids.add, job.msg_ids)
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
self.spin()
while theids.intersection(self.outstanding):
if timeout >= 0 and ( time.time()-tic ) > timeout:
break
time.sleep(1e-3)
self.spin()
return len(theids.intersection(self.outstanding)) == 0
|
waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
|
def argument_run(self, sp_r):
"""
.. _argument_run:
Converts Arguments according to ``to_int``
"""
arg_run = []
for line in sp_r:
logging.debug("argument run: handling: " + str(line))
if(line[1] == "data"):
arg_run.append( (line[0], line[1], line[2], line[2].get_words(line[3])))
continue
if(line[1] == "command"):
self.checkargs(line[0], line[2], line[3])
arg_run.append( (line[0], line[1], line[2], [a for a in self.convert_args(line[2], line[3])]))
return arg_run
|
.. _argument_run:
Converts Arguments according to ``to_int``
|
def spawn(self, args, executable=None, stdin=None, stdout=None, stderr=None,
shell=False, cwd=None, env=None, flags=0, extra_handles=None):
"""Spawn a new child process.
The executable to spawn and its arguments are determined by *args*,
*executable* and *shell*.
When *shell* is set to ``False`` (the default), *args* is normally a
sequence and it contains both the program to execute (at index 0), and
its arguments.
When *shell* is set to ``True``, then *args* is normally a string and
it indicates the command to execute through the shell.
The *executable* argument can be used to override the executable to
execute. If *shell* is ``False``, it overrides ``args[0]``. This is
sometimes used on Unix to implement "fat" executables that behave
differently based on argv[0]. If *shell* is ``True``, it overrides the
shell to use. The default shell is ``'/bin/sh'`` on Unix, and the value
of $COMSPEC (or ``'cmd.exe'`` if it is unset) on Windows.
The *stdin*, *stdout* and *stderr* arguments specify how to handle
standard input, output, and error, respectively. If set to None, then
the child will inherit our respective stdio handle. If set to the
special constant ``PIPE`` then a pipe is created. The pipe will be
connected to a :class:`gruvi.StreamProtocol` which you can use to read
or write from it. The stream protocol instance is available under
either :attr:`stdin`, :attr:`stdout` or :attr:`stderr`. All 3 stdio
arguments can also be a file descriptor, a file-like object, or a pyuv
``Stream`` instance.
The *extra_handles* specifies any extra handles to pass to the client.
It must be a sequence where each element is either a file descriptor, a
file-like objects, or a ``pyuv.Stream`` instance. The position in the
sequence determines the file descriptor in the client. The first
position corresponds to FD 3, the second to 4, etc. This places these
file descriptors directly after the stdio handles.
The *cwd* argument specifies the directory to change to before
executing the child. If not provided, the current directory is used.
The *env* argument specifies the environment to use when executing the
child. If provided, it must be a dictionary. By default, the current
environment is used.
The *flags* argument can be used to specify optional libuv
``uv_process_flags``. The only relevant flags are
``pyuv.UV_PROCESS_DETACHED`` and ``pyuv.UV_PROCESS_WINDOWS_HIDE``. Both
are Windows specific and are silently ignored on Unix.
"""
if self._process:
raise RuntimeError('child process already spawned')
self._child_exited.clear()
self._closed.clear()
self._exit_status = None
self._term_signal = None
hub = get_hub()
if isinstance(args, str):
args = [args]
flags |= pyuv.UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS
else:
args = list(args)
if shell:
if hasattr(os, 'fork'):
# Unix
if executable is None:
executable = '/bin/sh'
args = [executable, '-c'] + args
else:
# Windows
if executable is None:
executable = os.environ.get('COMSPEC', 'cmd.exe')
args = [executable, '/c'] + args
if executable is None:
executable = args[0]
kwargs = {}
if env is not None:
kwargs['env'] = env
if cwd is not None:
kwargs['cwd'] = cwd
kwargs['flags'] = flags
handles = self._get_child_handles(hub.loop, stdin, stdout, stderr, extra_handles)
kwargs['stdio'] = handles
process = pyuv.Process.spawn(hub.loop, args, executable,
exit_callback=self._on_child_exit, **kwargs)
# Create stdin/stdout/stderr transports/protocols.
if handles[0].stream:
self._stdin = self._connect_child_handle(handles[0])
if handles[1].stream:
self._stdout = self._connect_child_handle(handles[1])
if handles[2].stream:
self._stderr = self._connect_child_handle(handles[2])
self._process = process
|
Spawn a new child process.
The executable to spawn and its arguments are determined by *args*,
*executable* and *shell*.
When *shell* is set to ``False`` (the default), *args* is normally a
sequence and it contains both the program to execute (at index 0), and
its arguments.
When *shell* is set to ``True``, then *args* is normally a string and
it indicates the command to execute through the shell.
The *executable* argument can be used to override the executable to
execute. If *shell* is ``False``, it overrides ``args[0]``. This is
sometimes used on Unix to implement "fat" executables that behave
differently based on argv[0]. If *shell* is ``True``, it overrides the
shell to use. The default shell is ``'/bin/sh'`` on Unix, and the value
of $COMSPEC (or ``'cmd.exe'`` if it is unset) on Windows.
The *stdin*, *stdout* and *stderr* arguments specify how to handle
standard input, output, and error, respectively. If set to None, then
the child will inherit our respective stdio handle. If set to the
special constant ``PIPE`` then a pipe is created. The pipe will be
connected to a :class:`gruvi.StreamProtocol` which you can use to read
or write from it. The stream protocol instance is available under
either :attr:`stdin`, :attr:`stdout` or :attr:`stderr`. All 3 stdio
arguments can also be a file descriptor, a file-like object, or a pyuv
``Stream`` instance.
The *extra_handles* specifies any extra handles to pass to the client.
It must be a sequence where each element is either a file descriptor, a
file-like objects, or a ``pyuv.Stream`` instance. The position in the
sequence determines the file descriptor in the client. The first
position corresponds to FD 3, the second to 4, etc. This places these
file descriptors directly after the stdio handles.
The *cwd* argument specifies the directory to change to before
executing the child. If not provided, the current directory is used.
The *env* argument specifies the environment to use when executing the
child. If provided, it must be a dictionary. By default, the current
environment is used.
The *flags* argument can be used to specify optional libuv
``uv_process_flags``. The only relevant flags are
``pyuv.UV_PROCESS_DETACHED`` and ``pyuv.UV_PROCESS_WINDOWS_HIDE``. Both
are Windows specific and are silently ignored on Unix.
|
def replace_surrogate_encode(mystring, exc):
"""
Returns a (unicode) string, not the more logical bytes, because the codecs
register_error functionality expects this.
"""
decoded = []
for ch in mystring:
# if PY3:
# code = ch
# else:
code = ord(ch)
# The following magic comes from Py3.3's Python/codecs.c file:
if not 0xD800 <= code <= 0xDCFF:
# Not a surrogate. Fail with the original exception.
raise exc
# mybytes = [0xe0 | (code >> 12),
# 0x80 | ((code >> 6) & 0x3f),
# 0x80 | (code & 0x3f)]
# Is this a good idea?
if 0xDC00 <= code <= 0xDC7F:
decoded.append(_unichr(code - 0xDC00))
elif code <= 0xDCFF:
decoded.append(_unichr(code - 0xDC00))
else:
raise NotASurrogateError
return str().join(decoded)
|
Returns a (unicode) string, not the more logical bytes, because the codecs
register_error functionality expects this.
|
def _interception(self, joinpoint):
"""Intercept call of joinpoint callee in doing pre/post conditions.
"""
if self.pre_cond is not None:
self.pre_cond(joinpoint)
result = joinpoint.proceed()
if self.post_cond is not None:
joinpoint.exec_ctx[Condition.RESULT] = result
self.post_cond(joinpoint)
return result
|
Intercept call of joinpoint callee in doing pre/post conditions.
|
def Betainc(a, b, x):
"""
Complemented, incomplete gamma op.
"""
return sp.special.betainc(a, b, x),
|
Complemented, incomplete gamma op.
|
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end)
|
doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that.
|
def get_permission_required(cls):
"""
Get permission required property.
Must return an iterable.
"""
if cls.permission_required is None:
raise ImproperlyConfigured(
"{0} is missing the permission_required attribute. "
"Define {0}.permission_required, or override "
"{0}.get_permission_required().".format(cls.__name__)
)
if isinstance(cls.permission_required, six.string_types):
if cls.permission_required != "":
perms = (cls.permission_required,)
else:
perms = ()
else:
perms = cls.permission_required
return perms
|
Get permission required property.
Must return an iterable.
|
def collapse(self, indices, values):
"""Partly collapse the interval product to single values.
Note that no changes are made in-place.
Parameters
----------
indices : int or sequence of ints
The indices of the dimensions along which to collapse.
values : `array-like` or float
The values to which to collapse. Must have the same
length as ``indices``. Values must lie within the interval
boundaries.
Returns
-------
collapsed : `IntervalProd`
The collapsed set.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.collapse(1, 0)
IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ])
>>> rbox.collapse([1, 2], [0, 2.5])
IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
"""
indices = np.atleast_1d(indices).astype('int64', casting='safe')
values = np.atleast_1d(values)
if len(indices) != len(values):
raise ValueError('lengths of indices {} and values {} do not '
'match ({} != {})'
''.format(indices, values,
len(indices), len(values)))
for axis, index in enumerate(indices):
if not 0 <= index <= self.ndim:
raise IndexError('in axis {}: index {} out of range 0 --> {}'
''.format(axis, index, self.ndim - 1))
if np.any(values < self.min_pt[indices]):
raise ValueError('values {} not above the lower interval '
'boundaries {}'
''.format(values, self.min_pt[indices]))
if np.any(values > self.max_pt[indices]):
raise ValueError('values {} not below the upper interval '
'boundaries {}'
''.format(values, self.max_pt[indices]))
b_new = self.min_pt.copy()
b_new[indices] = values
e_new = self.max_pt.copy()
e_new[indices] = values
return IntervalProd(b_new, e_new)
|
Partly collapse the interval product to single values.
Note that no changes are made in-place.
Parameters
----------
indices : int or sequence of ints
The indices of the dimensions along which to collapse.
values : `array-like` or float
The values to which to collapse. Must have the same
length as ``indices``. Values must lie within the interval
boundaries.
Returns
-------
collapsed : `IntervalProd`
The collapsed set.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
>>> rbox.collapse(1, 0)
IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ])
>>> rbox.collapse([1, 2], [0, 2.5])
IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
|
def prepare(self, strict=True):
""" preparation for loaded json
:param bool strict: when in strict mode, exception would be raised if not valid.
"""
self.__root = self.prepare_obj(self.raw, self.__url)
self.validate(strict=strict)
if hasattr(self.__root, 'schemes') and self.__root.schemes:
if len(self.__root.schemes) > 0:
self.__schemes = self.__root.schemes
else:
# extract schemes from the url to load spec
self.__schemes = [six.moves.urlparse(self.__url).schemes]
s = Scanner(self)
s.scan(root=self.__root, route=[Merge()])
s.scan(root=self.__root, route=[PatchObject()])
s.scan(root=self.__root, route=[Aggregate()])
# reducer for Operation
tr = TypeReduce(self.__sep)
cy = CycleDetector()
s.scan(root=self.__root, route=[tr, cy])
# 'op' -- shortcut for Operation with tag and operaionId
self.__op = utils.ScopeDict(tr.op)
# 'm' -- shortcut for model in Swagger 1.2
if hasattr(self.__root, 'definitions') and self.__root.definitions != None:
self.__m = utils.ScopeDict(self.__root.definitions)
else:
self.__m = utils.ScopeDict({})
# update scope-separater
self.__m.sep = self.__sep
self.__op.sep = self.__sep
# cycle detection
if len(cy.cycles['schema']) > 0 and strict:
raise errs.CycleDetectionError('Cycles detected in Schema Object: {0}'.format(cy.cycles['schema']))
|
preparation for loaded json
:param bool strict: when in strict mode, exception would be raised if not valid.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.