code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def check_basic_auth(user, passwd):
"""Checks user authentication using HTTP Basic Auth."""
auth = request.authorization
return auth and auth.username == user and auth.password == passwd
|
Checks user authentication using HTTP Basic Auth.
|
def register(cls):
"""Register an :class:`Entity` as a commentable class.
Can be used as a class decorator:
.. code-block:: python
@comment.register
class MyContent(Entity):
...
"""
if not issubclass(cls, Entity):
raise ValueError("Class must be a subclass of abilian.core.entities.Entity")
Commentable.register(cls)
return cls
|
Register an :class:`Entity` as a commentable class.
Can be used as a class decorator:
.. code-block:: python
@comment.register
class MyContent(Entity):
...
|
def p_stop_raise(p):
""" statement : STOP expr
| STOP
"""
q = make_number(9, lineno=p.lineno(1)) if len(p) == 2 else p[2]
z = make_number(1, lineno=p.lineno(1))
r = make_binary(p.lineno(1), 'MINUS',
make_typecast(TYPE.ubyte, q, p.lineno(1)), z,
lambda x, y: x - y)
p[0] = make_sentence('STOP', r)
|
statement : STOP expr
| STOP
|
def _prepare_headers(self, additional_headers=None, **kwargs):
"""Prepare headers for http communication.
Return dict of header to be used in requests.
Args:
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Headers dict. Key and values are string
"""
user_agent = "pyseaweed/{version}".format(version=__version__)
headers = {"User-Agent": user_agent}
if additional_headers is not None:
headers.update(additional_headers)
return headers
|
Prepare headers for http communication.
Return dict of header to be used in requests.
Args:
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Headers dict. Key and values are string
|
def run(*args, **kwargs):
# type: (...) -> None
"""Run cwltool."""
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
|
Run cwltool.
|
def new(cls, signatures: [Signature]) -> 'MultiSignature':
"""
Creates and returns BLS multi signature that corresponds to the given signatures list.
:param: signature - List of signatures
:return: BLS multi signature
"""
logger = logging.getLogger(__name__)
logger.debug("MultiSignature::new: >>>")
# noinspection PyCallingNonCallable,PyTypeChecker
signature_c_instances = (c_void_p * len(signatures))()
for i in range(len(signatures)):
signature_c_instances[i] = signatures[i].c_instance
c_instance = c_void_p()
do_call(cls.new_handler, signature_c_instances, len(signatures), byref(c_instance))
res = cls(c_instance)
logger.debug("MultiSignature::new: <<< res: %r", res)
return res
|
Creates and returns BLS multi signature that corresponds to the given signatures list.
:param: signature - List of signatures
:return: BLS multi signature
|
def predict_steadystate(self, u=0, B=None):
"""
Predict state (prior) using the Kalman filter state propagation
equations. Only x is updated, P is left unchanged. See
update_steadstate() for a longer explanation of when to use this
method.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
"""
if B is None:
B = self.B
# x = Fx + Bu
if B is not None:
self.x = dot(self.F, self.x) + dot(B, u)
else:
self.x = dot(self.F, self.x)
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
|
Predict state (prior) using the Kalman filter state propagation
equations. Only x is updated, P is left unchanged. See
update_steadstate() for a longer explanation of when to use this
method.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
|
def _controller(self):
"""
This method runs in a dedicated thread, calling self.eval_command().
:return:
"""
while self.running:
try:
cmd = self._controller_q.get(timeout=1)
except (TimeoutError, Empty):
continue
log.debug("WSSAPI._controller(): Received command: %s", cmd)
Thread(target=self.eval_command, args=(cmd,)).start()
|
This method runs in a dedicated thread, calling self.eval_command().
:return:
|
def _setup_rpc(self):
"""Setup the RPC client for the current agent."""
self._plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self._state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self._client = n_rpc.get_client(self.target)
self._consumers.extend([
[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE],
[topics.PORT, topics.DELETE]
])
self._connection = agent_rpc.create_consumers(
self._endpoints, self._topic, self._consumers,
start_listening=False
)
self._setup_qos_extension()
self._connection.consume_in_threads()
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
|
Setup the RPC client for the current agent.
|
def toggle_create_button(self):
"""Enable or disable Create Mask button based on drawn objects."""
if len(self._drawn_tags) > 0:
self.w.create_mask.set_enabled(True)
else:
self.w.create_mask.set_enabled(False)
|
Enable or disable Create Mask button based on drawn objects.
|
def project(ctx, project): # pylint:disable=redefined-outer-name
"""Commands for projects."""
if ctx.invoked_subcommand not in ['create', 'list']:
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
|
Commands for projects.
|
def select_one(self, tag):
"""Select a single tag."""
tags = self.select(tag, limit=1)
return tags[0] if tags else None
|
Select a single tag.
|
def get_sns_topic_arn(topic_name, account, region):
"""Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name
"""
if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'):
return topic_name
session = boto3.Session(profile_name=account, region_name=region)
sns_client = session.client('sns')
topics = sns_client.list_topics()['Topics']
matched_topic = None
for topic in topics:
topic_arn = topic['TopicArn']
if topic_name == topic_arn.split(':')[-1]:
matched_topic = topic_arn
break
else:
LOG.critical("No topic with name %s found.", topic_name)
raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))
return matched_topic
|
Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name
|
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
import idna
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
name = idna_encode(name)
if sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
|
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
|
def reset_image_attribute(self, image_id, attribute='launchPermission'):
"""
Resets an attribute of an AMI to its default value.
:type image_id: string
:param image_id: ID of the AMI for which an attribute will be described
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_status('ResetImageAttribute', params, verb='POST')
|
Resets an attribute of an AMI to its default value.
:type image_id: string
:param image_id: ID of the AMI for which an attribute will be described
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
|
def setpassword(self, password):
"""Sets the password to use when extracting.
"""
self._password = password
if self._file_parser:
if self._file_parser.has_header_encryption():
self._file_parser = None
if not self._file_parser:
self._parse()
else:
self._file_parser.setpassword(self._password)
|
Sets the password to use when extracting.
|
def check_ip(addr):
'''
Check if address is a valid IP. returns True if valid, otherwise False.
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
salt ns1 dig.check_ip 1111:2222:3333:4444:5555:6666:7777:8888
'''
try:
addr = addr.rsplit('/', 1)
except AttributeError:
# Non-string passed
return False
if salt.utils.network.is_ipv4(addr[0]):
try:
if 1 <= int(addr[1]) <= 32:
return True
except ValueError:
# Non-int subnet notation
return False
except IndexError:
# No subnet notation used (i.e. just an IPv4 address)
return True
if salt.utils.network.is_ipv6(addr[0]):
try:
if 8 <= int(addr[1]) <= 128:
return True
except ValueError:
# Non-int subnet notation
return False
except IndexError:
# No subnet notation used (i.e. just an IPv4 address)
return True
return False
|
Check if address is a valid IP. returns True if valid, otherwise False.
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
salt ns1 dig.check_ip 1111:2222:3333:4444:5555:6666:7777:8888
|
def mount_status_encode(self, target_system, target_component, pointing_a, pointing_b, pointing_c):
'''
Message with some status from APM to GCS about camera or antenna mount
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
pointing_a : pitch(deg*100) (int32_t)
pointing_b : roll(deg*100) (int32_t)
pointing_c : yaw(deg*100) (int32_t)
'''
return MAVLink_mount_status_message(target_system, target_component, pointing_a, pointing_b, pointing_c)
|
Message with some status from APM to GCS about camera or antenna mount
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
pointing_a : pitch(deg*100) (int32_t)
pointing_b : roll(deg*100) (int32_t)
pointing_c : yaw(deg*100) (int32_t)
|
def _scan(self, type):
"""
Returns the matched text, and moves to the next token
"""
tok = self._scanner.token(self._pos, frozenset([type]))
self._char_pos = tok[0]
if tok[2] != type:
raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type))
self._pos += 1
return tok[3]
|
Returns the matched text, and moves to the next token
|
def start_polling(dispatcher, *, loop=None, skip_updates=False, reset_webhook=True,
on_startup=None, on_shutdown=None, timeout=20, fast=True):
"""
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
"""
executor = Executor(dispatcher, skip_updates=skip_updates, loop=loop)
_setup_callbacks(executor, on_startup, on_shutdown)
executor.start_polling(reset_webhook=reset_webhook, timeout=timeout, fast=fast)
|
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
|
def semester_feature(catalog, soup):
"""The year and semester information that this xml file hold courses for.
"""
raw = soup.coursedb['semesternumber']
catalog.year = int(raw[:4])
month_mapping = {1: 'Spring', 5: 'Summer', 9: 'Fall'}
catalog.month = int(raw[4:])
catalog.semester = month_mapping[catalog.month]
catalog.name = soup.coursedb['semesterdesc']
logger.info('Catalog type: %s' % catalog.name)
|
The year and semester information that this xml file hold courses for.
|
def _convert_xml_to_service_properties(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
'''
if response is None or response.body is None:
return None
service_properties_element = ETree.fromstring(response.body)
service_properties = ServiceProperties()
# Logging
logging = service_properties_element.find('Logging')
if logging is not None:
service_properties.logging = Logging()
service_properties.logging.version = logging.find('Version').text
service_properties.logging.delete = _bool(logging.find('Delete').text)
service_properties.logging.read = _bool(logging.find('Read').text)
service_properties.logging.write = _bool(logging.find('Write').text)
_convert_xml_to_retention_policy(logging.find('RetentionPolicy'),
service_properties.logging.retention_policy)
# HourMetrics
hour_metrics_element = service_properties_element.find('HourMetrics')
if hour_metrics_element is not None:
service_properties.hour_metrics = Metrics()
_convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics)
# MinuteMetrics
minute_metrics_element = service_properties_element.find('MinuteMetrics')
if minute_metrics_element is not None:
service_properties.minute_metrics = Metrics()
_convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics)
# CORS
cors = service_properties_element.find('Cors')
if cors is not None:
service_properties.cors = list()
for rule in cors.findall('CorsRule'):
allowed_origins = rule.find('AllowedOrigins').text.split(',')
allowed_methods = rule.find('AllowedMethods').text.split(',')
max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)
cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds)
exposed_headers = rule.find('ExposedHeaders').text
if exposed_headers is not None:
cors_rule.exposed_headers = exposed_headers.split(',')
allowed_headers = rule.find('AllowedHeaders').text
if allowed_headers is not None:
cors_rule.allowed_headers = allowed_headers.split(',')
service_properties.cors.append(cors_rule)
# Target version
target_version = service_properties_element.find('DefaultServiceVersion')
if target_version is not None:
service_properties.target_version = target_version.text
# DeleteRetentionPolicy
delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy')
if delete_retention_policy_element is not None:
service_properties.delete_retention_policy = DeleteRetentionPolicy()
policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text)
service_properties.delete_retention_policy.enabled = policy_enabled
if policy_enabled:
service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text)
# StaticWebsite
static_website_element = service_properties_element.find('StaticWebsite')
if static_website_element is not None:
service_properties.static_website = StaticWebsite()
service_properties.static_website.enabled = _bool(static_website_element.find('Enabled').text)
index_document_element = static_website_element.find('IndexDocument')
if index_document_element is not None:
service_properties.static_website.index_document = index_document_element.text
error_document_element = static_website_element.find('ErrorDocument404Path')
if error_document_element is not None:
service_properties.static_website.error_document_404_path = error_document_element.text
return service_properties
|
<?xml version="1.0" encoding="utf-8"?>
<StorageServiceProperties>
<Logging>
<Version>version-number</Version>
<Delete>true|false</Delete>
<Read>true|false</Read>
<Write>true|false</Write>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</Logging>
<HourMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</HourMetrics>
<MinuteMetrics>
<Version>version-number</Version>
<Enabled>true|false</Enabled>
<IncludeAPIs>true|false</IncludeAPIs>
<RetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</RetentionPolicy>
</MinuteMetrics>
<Cors>
<CorsRule>
<AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
<AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
<MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
<ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
<AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
</CorsRule>
</Cors>
<DeleteRetentionPolicy>
<Enabled>true|false</Enabled>
<Days>number-of-days</Days>
</DeleteRetentionPolicy>
<StaticWebsite>
<Enabled>true|false</Enabled>
<IndexDocument></IndexDocument>
<ErrorDocument404Path></ErrorDocument404Path>
</StaticWebsite>
</StorageServiceProperties>
|
def rotate(self, clat, clon, coord_degrees=True, dj_matrix=None,
nwinrot=None):
""""
Rotate the spherical-cap windows centered on the North pole to clat
and clon, and save the spherical harmonic coefficients in the
attribute coeffs.
Usage
-----
x.rotate(clat, clon [coord_degrees, dj_matrix, nwinrot])
Parameters
----------
clat, clon : float
Latitude and longitude of the center of the rotated spherical-cap
localization windows (default in degrees).
coord_degrees : bool, optional, default = True
True if clat and clon are in degrees.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2.
nwinrot : int, optional, default = (lwin+1)**2
The number of best concentrated windows to rotate, where lwin is
the spherical harmonic bandwidth of the localization windows.
Description
-----------
This function will take the spherical-cap localization windows
centered at the North pole (and saved in the attributes tapers and
orders), rotate each function to the coordinate (clat, clon), and save
the spherical harmonic coefficients in the attribute coeffs. Each
column of coeffs contains a single window, and the coefficients are
ordered according to the convention in SHCilmToVector.
"""
self.coeffs = _np.zeros(((self.lwin + 1)**2, self.nwin))
self.clat = clat
self.clon = clon
self.coord_degrees = coord_degrees
if nwinrot is not None:
self.nwinrot = nwinrot
else:
self.nwinrot = self.nwin
if self.coord_degrees:
angles = _np.radians(_np.array([0., -(90. - clat), -clon]))
else:
angles = _np.array([0., -(_np.pi/2. - clat), -clon])
if dj_matrix is None:
if self.dj_matrix is None:
self.dj_matrix = _shtools.djpi2(self.lwin + 1)
dj_matrix = self.dj_matrix
else:
dj_matrix = self.dj_matrix
if ((coord_degrees is True and clat == 90. and clon == 0.) or
(coord_degrees is False and clat == _np.pi/2. and clon == 0.)):
for i in range(self.nwinrot):
coeffs = self._taper2coeffs(i)
self.coeffs[:, i] = _shtools.SHCilmToVector(coeffs)
else:
coeffs = _shtools.SHRotateTapers(self.tapers, self.orders,
self.nwinrot, angles, dj_matrix)
self.coeffs = coeffs
|
Rotate the spherical-cap windows centered on the North pole to clat
and clon, and save the spherical harmonic coefficients in the
attribute coeffs.
Usage
-----
x.rotate(clat, clon [coord_degrees, dj_matrix, nwinrot])
Parameters
----------
clat, clon : float
Latitude and longitude of the center of the rotated spherical-cap
localization windows (default in degrees).
coord_degrees : bool, optional, default = True
True if clat and clon are in degrees.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2.
nwinrot : int, optional, default = (lwin+1)**2
The number of best concentrated windows to rotate, where lwin is
the spherical harmonic bandwidth of the localization windows.
Description
-----------
This function will take the spherical-cap localization windows
centered at the North pole (and saved in the attributes tapers and
orders), rotate each function to the coordinate (clat, clon), and save
the spherical harmonic coefficients in the attribute coeffs. Each
column of coeffs contains a single window, and the coefficients are
ordered according to the convention in SHCilmToVector.
|
def add_device(self, device, container):
"""Add a device to a group. Wraps JSSObject.add_object_to_path.
Args:
device: A JSSObject to add (as list data), to this object.
location: Element or a string path argument to find()
"""
# There is a size tag which the JSS manages for us, so we can
# ignore it.
if self.findtext("is_smart") == "false":
self.add_object_to_path(device, container)
else:
# Technically this isn't true. It will strangely accept
# them, and they even show up as members of the group!
raise ValueError("Devices may not be added to smart groups.")
|
Add a device to a group. Wraps JSSObject.add_object_to_path.
Args:
device: A JSSObject to add (as list data), to this object.
location: Element or a string path argument to find()
|
def is_modifier(key):
"""
Returns True if `key` is a scan code or name of a modifier key.
"""
if _is_str(key):
return key in all_modifiers
else:
if not _modifier_scan_codes:
scan_codes = (key_to_scan_codes(name, False) for name in all_modifiers)
_modifier_scan_codes.update(*scan_codes)
return key in _modifier_scan_codes
|
Returns True if `key` is a scan code or name of a modifier key.
|
def _filter(self, query, **kwargs):
"""
Filter a query with user-supplied arguments.
"""
query = self._auto_filter(query, **kwargs)
return query
|
Filter a query with user-supplied arguments.
|
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode)
|
decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
|
def ensure_dir_does_not_exist(*args):
"""Ensures that the given directory does not exist."""
path = os.path.join(*args)
if os.path.isdir(path):
shutil.rmtree(path)
|
Ensures that the given directory does not exist.
|
def deregister(self, subscriber):
"""Stop publishing to a subscriber."""
try:
logger.debug('Subscriber left')
self.subscribers.remove(subscriber)
except KeyError:
logger.debug(
'Error removing subscriber: ' +
str(subscriber))
|
Stop publishing to a subscriber.
|
def import_jwks_as_json(self, jwks, issuer):
"""
Imports all the keys that are represented in a JWKS expressed as a
JSON object
:param jwks: JSON representation of a JWKS
:param issuer: Who 'owns' the JWKS
"""
return self.import_jwks(json.loads(jwks), issuer)
|
Imports all the keys that are represented in a JWKS expressed as a
JSON object
:param jwks: JSON representation of a JWKS
:param issuer: Who 'owns' the JWKS
|
def mult_masses(mA, f_binary=0.4, f_triple=0.12,
minmass=0.11, qmin=0.1, n=1e5):
"""Returns m1, m2, and m3 appropriate for TripleStarPopulation, given "primary" mass (most massive of system) and binary/triple fractions.
star with m1 orbits (m2 + m3). This means that the primary mass mA will correspond
either to m1 or m2. Any mass set to 0 means that component does not exist.
"""
if np.size(mA) > 1:
n = len(mA)
else:
mA = np.ones(n) * mA
r = rand.random(n)
is_single = r > (f_binary + f_triple)
is_double = (r > f_triple) & (r < (f_binary + f_triple))
is_triple = r <= f_triple
CwA = rand.random(n) < 0.5
CwB = ~CwA
#these for Triples:
minq2_A = minmass/mA
q2_A = rand.random(n)*(1-minq2_A) + minq2_A
minq1_A = (minmass/mA)/(1+q2_A)
maxq1_A = 1/(1+q2_A)
q1_A = rand.random(n)*(maxq1_A-minq1_A) + minq1_A
minq1_B = 2*minmass/mA
q1_B = rand.random(n)*(1-minq1_B) + minq1_B
minq2_B = np.maximum(((q1_B*mA)-minmass)/minmass,
(q1_B*mA - minmass)/(q1_B*mA + minmass))
maxq2_B = 1.
q2_B = rand.random(n)*(maxq2_B-minq2_B) + minq2_B
mB_A = q1_A*(1 + q2_A) * mA
mC_A = q2_A * mA
mB_B = (q1_B/(1 + q2_B)) * mA
mC_B = (q1_B*q2_B)/(1 + q2_B) * mA
mB = CwA*mB_A + CwB*mB_B
mC = CwA*mC_A + CwB*mC_B
#for binaries-only
qmin = minmass/mA
q = rand.random(n)*(1-qmin) + qmin
mB[is_double] = q[is_double]*mA[is_double]
#now need to define the proper mapping from A,B,C to 1,2,3:
# If no B or C present, then A=1
# If B present but not C, then A=1, B=2
# If both B and C present then:
# If C is with A, then A=2, C=3, B=1
# If C is with B, then A=1, B=2, C=3
m1 = (mA)*(is_single + is_double) + (CwA*mB + CwB*mA)*is_triple
m2 = (mB)*is_double + (CwA*mA + CwB*mB)*is_triple
m3 = mC*is_triple
return m1, m2, m3
|
Returns m1, m2, and m3 appropriate for TripleStarPopulation, given "primary" mass (most massive of system) and binary/triple fractions.
star with m1 orbits (m2 + m3). This means that the primary mass mA will correspond
either to m1 or m2. Any mass set to 0 means that component does not exist.
|
def add_cache_tier(self, cache_pool, mode):
"""
Adds a new cache tier to an existing pool.
:param cache_pool: six.string_types. The cache tier pool name to add.
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
:return: None
"""
# Check the input types and values
validator(value=cache_pool, valid_type=six.string_types)
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
|
Adds a new cache tier to an existing pool.
:param cache_pool: six.string_types. The cache tier pool name to add.
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
:return: None
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
mktime = args['mktimefilter']
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}')
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
irf_ver=NAME_FACTORY.irf_ver(),
mktime=mktime,
fullpath=True)
outfile = NAME_FACTORY.bexpcube_sun(**name_keys)
ltcube_sun = NAME_FACTORY.ltcube_sun(**name_keys)
job_configs[key] = dict(infile=NAME_FACTORY.ltcube_sun(**name_keys),
outfile=outfile,
irfs=NAME_FACTORY.irfs(**name_keys),
evtype=comp.evtype,
emin=comp.emin,
emax=comp.emax,
enumbins=comp.enumbins,
logfile=make_nfs_path(outfile.replace('.fits', '.log')))
return job_configs
|
Hook to build job configurations
|
def ProcessEntry(self, responses):
"""Process the responses from the client."""
if not responses.success:
return
# The Find client action does not return a StatEntry but a
# FindSpec. Normalize to a StatEntry.
stat_responses = [
r.hit if isinstance(r, rdf_client_fs.FindSpec) else r for r in responses
]
# If this was a pure path matching call without any regex / recursion, we
# know exactly which node in the component tree we have to process next and
# get it from the component_path. If this was a regex match though, we
# sent the client a combined regex that matches all nodes in order to save
# round trips and client processing time. In that case we only get the
# base node and have to check for all subnodes if the response actually
# matches that subnode before we continue processing.
component_path = responses.request_data.get("component_path")
if component_path is not None:
for response in stat_responses:
self._ProcessResponse(response, [component_path])
else:
# This is a combined match.
base_path = responses.request_data["base_path"]
base_node = self.FindNode(base_path)
for response in stat_responses:
matching_components = []
for next_node in base_node:
pathspec = rdf_paths.PathSpec.FromSerializedString(next_node)
if self._MatchPath(pathspec, response):
matching_path = base_path + [next_node]
matching_components.append(matching_path)
if matching_components:
self._ProcessResponse(
response, matching_components, base_wildcard=True)
|
Process the responses from the client.
|
def get(self, test_id):
'''
get report by the test id
:param test_id: test id
:return: Report object
'''
self.select('*', 'test_id=?', [test_id])
row = self._cursor.fetchone()
if not row:
raise KeyError('No report with test id %s in the DB' % test_id)
values = self.row_to_dict(row)
content = self._deserialize_dict(values['content'])
return Report.from_dict(content)
|
get report by the test id
:param test_id: test id
:return: Report object
|
def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns)
|
Return True if object is defined in current namespace
|
def create_dialog(obj, obj_name):
"""Creates the editor dialog and returns a tuple (dialog, func) where func
is the function to be called with the dialog instance as argument, after
quitting the dialog box
The role of this intermediate function is to allow easy monkey-patching.
(uschmitt suggested this indirection here so that he can monkey patch
oedit to show eMZed related data)
"""
# Local import
from spyder_kernels.utils.nsview import (ndarray, FakeObject,
Image, is_known_type, DataFrame,
Series)
from spyder.plugins.variableexplorer.widgets.texteditor import TextEditor
from spyder.plugins.variableexplorer.widgets.collectionseditor import (
CollectionsEditor)
from spyder.plugins.variableexplorer.widgets.arrayeditor import (
ArrayEditor)
if DataFrame is not FakeObject:
from spyder.plugins.variableexplorer.widgets.dataframeeditor import (
DataFrameEditor)
conv_func = lambda data: data
readonly = not is_known_type(obj)
if isinstance(obj, ndarray) and ndarray is not FakeObject:
dialog = ArrayEditor()
if not dialog.setup_and_check(obj, title=obj_name,
readonly=readonly):
return
elif isinstance(obj, Image) and Image is not FakeObject \
and ndarray is not FakeObject:
dialog = ArrayEditor()
import numpy as np
data = np.array(obj)
if not dialog.setup_and_check(data, title=obj_name,
readonly=readonly):
return
from spyder.pil_patch import Image
conv_func = lambda data: Image.fromarray(data, mode=obj.mode)
elif isinstance(obj, (DataFrame, Series)) and DataFrame is not FakeObject:
dialog = DataFrameEditor()
if not dialog.setup_and_check(obj):
return
elif is_text_string(obj):
dialog = TextEditor(obj, title=obj_name, readonly=readonly)
else:
dialog = CollectionsEditor()
dialog.setup(obj, title=obj_name, readonly=readonly)
def end_func(dialog):
return conv_func(dialog.get_value())
return dialog, end_func
|
Creates the editor dialog and returns a tuple (dialog, func) where func
is the function to be called with the dialog instance as argument, after
quitting the dialog box
The role of this intermediate function is to allow easy monkey-patching.
(uschmitt suggested this indirection here so that he can monkey patch
oedit to show eMZed related data)
|
def list_logstores(self, request):
""" List all logstores of requested project.
Unsuccessful opertaion will cause an LogException.
:type request: ListLogstoresRequest
:param request: the ListLogstores request parameters class.
:return: ListLogStoresResponse
:raise: LogException
"""
headers = {}
params = {}
resource = '/logstores'
project = request.get_project()
(resp, header) = self._send("GET", project, None, resource, params, headers)
return ListLogstoresResponse(resp, header)
|
List all logstores of requested project.
Unsuccessful opertaion will cause an LogException.
:type request: ListLogstoresRequest
:param request: the ListLogstores request parameters class.
:return: ListLogStoresResponse
:raise: LogException
|
def update(self, force=False):
"""Fetch new posts from the server.
Arguments:
force (bool): Force a thread update, even if thread has 404'd.
Returns:
int: How many new posts have been fetched.
"""
# The thread has already 404'ed, this function shouldn't do anything anymore.
if self.is_404 and not force:
return 0
if self._last_modified:
headers = {'If-Modified-Since': self._last_modified}
else:
headers = None
# random connection errors, just return 0 and try again later
try:
res = self._board._requests_session.get(self._api_url, headers=headers)
except:
# try again later
return 0
# 304 Not Modified, no new posts.
if res.status_code == 304:
return 0
# 404 Not Found, thread died.
elif res.status_code == 404:
self.is_404 = True
# remove post from cache, because it's gone.
self._board._thread_cache.pop(self.id, None)
return 0
elif res.status_code == 200:
# If we somehow 404'ed, we should put ourself back in the cache.
if self.is_404:
self.is_404 = False
self._board._thread_cache[self.id] = self
# Remove
self.want_update = False
self.omitted_images = 0
self.omitted_posts = 0
self._last_modified = res.headers['Last-Modified']
posts = res.json()['posts']
original_post_count = len(self.replies)
self.topic = Post(self, posts[0])
if self.last_reply_id and not force:
self.replies.extend(Post(self, p) for p in posts if p['no'] > self.last_reply_id)
else:
self.replies[:] = [Post(self, p) for p in posts[1:]]
new_post_count = len(self.replies)
post_count_delta = new_post_count - original_post_count
if not post_count_delta:
return 0
self.last_reply_id = self.replies[-1].post_number
return post_count_delta
else:
res.raise_for_status()
|
Fetch new posts from the server.
Arguments:
force (bool): Force a thread update, even if thread has 404'd.
Returns:
int: How many new posts have been fetched.
|
def reftrick(iseq, consdict):
""" Returns the most common base at each site in order. """
altrefs = np.zeros((iseq.shape[1], 4), dtype=np.uint8)
altrefs[:, 1] = 46
for col in xrange(iseq.shape[1]):
## expand colums with ambigs and remove N-
fcounts = np.zeros(111, dtype=np.int64)
counts = np.bincount(iseq[:, col])#, minlength=90)
fcounts[:counts.shape[0]] = counts
## set N and - to zero, wish numba supported minlen arg
fcounts[78] = 0
fcounts[45] = 0
## add ambig counts to true bases
for aidx in xrange(consdict.shape[0]):
nbases = fcounts[consdict[aidx, 0]]
for _ in xrange(nbases):
fcounts[consdict[aidx, 1]] += 1
fcounts[consdict[aidx, 2]] += 1
fcounts[consdict[aidx, 0]] = 0
## now get counts from the modified counts arr
who = np.argmax(fcounts)
altrefs[col, 0] = who
fcounts[who] = 0
## if an alt allele fill over the "." placeholder
who = np.argmax(fcounts)
if who:
altrefs[col, 1] = who
fcounts[who] = 0
## if 3rd or 4th alleles observed then add to arr
who = np.argmax(fcounts)
altrefs[col, 2] = who
fcounts[who] = 0
## if 3rd or 4th alleles observed then add to arr
who = np.argmax(fcounts)
altrefs[col, 3] = who
return altrefs
|
Returns the most common base at each site in order.
|
def _validate_type_scalar(self, value):
""" Is not a list or a dict """
if isinstance(
value, _int_types + (_str_type, float, date, datetime, bool)
):
return True
|
Is not a list or a dict
|
def make_control_flow_handlers(self, cont_n, status_n, expected_return,
has_cont, has_break):
'''
Create the statements in charge of gathering control flow information
for the static_if result, and executes the expected control flow
instruction
'''
if expected_return:
assign = cont_ass = [ast.Assign(
[ast.Tuple(expected_return, ast.Store())],
ast.Name(cont_n, ast.Load(), None))]
else:
assign = cont_ass = []
if has_cont:
cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None),
[ast.Eq()], [ast.Num(LOOP_CONT)])
cont_ass = [ast.If(cmpr,
deepcopy(assign) + [ast.Continue()],
cont_ass)]
if has_break:
cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None),
[ast.Eq()], [ast.Num(LOOP_BREAK)])
cont_ass = [ast.If(cmpr,
deepcopy(assign) + [ast.Break()],
cont_ass)]
return cont_ass
|
Create the statements in charge of gathering control flow information
for the static_if result, and executes the expected control flow
instruction
|
def _parse_values(s):
'''(INTERNAL) Split a line into a list of values'''
if not _RE_NONTRIVIAL_DATA.search(s):
# Fast path for trivial cases (unfortunately we have to handle missing
# values because of the empty string case :(.)
return [None if s in ('?', '') else s
for s in next(csv.reader([s]))]
# _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
if not any(errors):
return [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(s):
try:
return {int(k): _unquote(v)
for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
except ValueError as exc:
# an ARFF syntax error in sparse data
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
else:
# an ARFF syntax error
for match in _RE_DENSE_VALUES.finditer(s):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
|
(INTERNAL) Split a line into a list of values
|
def random_adjspecies(sep='', maxlen=8, prevent_stutter=True):
"""
Return a random adjective/species, separated by `sep`. The keyword
arguments `maxlen` and `prevent_stutter` are the same as for
`random_adjspecies_pair`, but note that the maximum length argument is
not affected by the separator.
"""
pair = random_adjspecies_pair(maxlen, prevent_stutter)
return pair[0] + sep + pair[1]
|
Return a random adjective/species, separated by `sep`. The keyword
arguments `maxlen` and `prevent_stutter` are the same as for
`random_adjspecies_pair`, but note that the maximum length argument is
not affected by the separator.
|
def applymap(self, func, **kwargs):
"""Return a new PRDD by applying a function to each element of each
pandas DataFrame."""
return self.from_rdd(
self._rdd.map(lambda data: data.applymap(func), **kwargs))
|
Return a new PRDD by applying a function to each element of each
pandas DataFrame.
|
def show_multi_buffer(self):
"""Show the multi buffer tool."""
from safe.gui.tools.multi_buffer_dialog import (
MultiBufferDialog)
dialog = MultiBufferDialog(
self.iface.mainWindow(), self.iface, self.dock_widget)
dialog.exec_()
|
Show the multi buffer tool.
|
def genes_with_a_representative_structure(self):
"""DictList: All genes with a representative protein structure."""
tmp = DictList(x for x in self.genes if x.protein.representative_structure)
return DictList(y for y in tmp if y.protein.representative_structure.structure_file)
|
DictList: All genes with a representative protein structure.
|
def get_free_udp_port():
'''
Get a free UDP port.
Note this is vlunerable to race conditions.
'''
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
addr = sock.getsockname()
sock.close()
return addr[1]
|
Get a free UDP port.
Note this is vlunerable to race conditions.
|
def BitField(value, length, signed=False, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False):
'''
Returns an instance of some BitField class
.. note::
Since BitField is frequently used in binary format, multiple aliases were created for it. See aliases.py for more details.
'''
if not full_range:
return _LibraryBitField(value, length, signed, min_value, max_value, encoder, fuzzable, name)
return _FullRangeBitField(value, length, signed, min_value, max_value, encoder, fuzzable, name)
|
Returns an instance of some BitField class
.. note::
Since BitField is frequently used in binary format, multiple aliases were created for it. See aliases.py for more details.
|
def translate_array(array, lval, obj_count=1, arr_count=1):
"""array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval"""
array = array[1:-1]
array, obj_rep, obj_count = remove_objects(array, obj_count)
array, arr_rep, arr_count = remove_arrays(array, arr_count)
#functions can be also defined in arrays, this caused many problems since in Python
# functions cant be defined inside literal
# remove functions (they dont contain arrays or objects so can be translated easily)
# hoisted functions are treated like inline
array, hoisted, inline = functions.remove_functions(array, all_inline=True)
assert not hoisted
arr = []
# separate elements in array
for e in argsplit(array, ','):
# translate expressions in array PyJsLvalInline will not be translated!
e = exp_translator(e.replace('\n', ''))
arr.append(e if e else 'None')
arr = '%s = Js([%s])\n' % (lval, ','.join(arr))
#But we can have more code to add to define arrays/objects/functions defined inside this array
# translate nested objects:
# functions:
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
arr = new_def + arr
for lval, obj in obj_rep.iteritems():
new_def, obj_count, arr_count = translate_object(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
for lval, obj in arr_rep.iteritems():
new_def, obj_count, arr_count = translate_array(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
return arr, obj_count, arr_count
|
array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval
|
def cf_string_to_unicode(value):
"""
Creates a python unicode string from a CFString object
:param value:
The CFString to convert
:return:
A python unicode string
"""
string_ptr = CoreFoundation.CFStringGetCStringPtr(
value,
kCFStringEncodingUTF8
)
string = None if is_null(string_ptr) else ffi.string(string_ptr)
if string is None:
buffer = buffer_from_bytes(1024)
result = CoreFoundation.CFStringGetCString(
value,
buffer,
1024,
kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = byte_string_from_buffer(buffer)
if string is not None:
string = string.decode('utf-8')
return string
|
Creates a python unicode string from a CFString object
:param value:
The CFString to convert
:return:
A python unicode string
|
def model_attr(attr_name):
"""
Creates a getter that will drop the current value
and retrieve the model's attribute with specified name.
@param attr_name: the name of an attribute belonging to the model.
@type attr_name: str
"""
def model_attr(_value, context, **_params):
value = getattr(context["model"], attr_name)
return _attr(value)
return model_attr
|
Creates a getter that will drop the current value
and retrieve the model's attribute with specified name.
@param attr_name: the name of an attribute belonging to the model.
@type attr_name: str
|
def error_bars(self, n_bins, d_min, d_max):
"""
Error Bars
create bars and error bars to plot
Inputs :
n_bins :
number of bins
plot_range : (shape) = (number of dimensions, 2)
matrix which contain the min and max for each dimension as rows
Outputs :
x :
domain
p_x :
estimated posterior using the chain on the domain
error :
estimated error for p_x
"""
# fetch data
chain = self._chain
len_chain = len(chain)
try:
n_dims = np.shape(chain)[1]
except:
n_dims = 1
# begin checks
try:
assert n_bins == int(n_bins)
except:
raise TypeError("number of bins has to be an integer")
d_min = np.reshape(np.array(d_min), (-1,1))
d_max = np.reshape(np.array(d_max), (-1,1))
try:
assert np.size(d_min) == n_dims
except:
raise TypeError("domain minimum has wrong size")
try:
assert np.size(d_max) == n_dims
except:
raise TypeError("domain maximum has wrong size")
# end checks
# initialize outputs
p_x = np.zeros(n_bins) # esitmate of posterior
error = np.zeros(n_bins) # error bars
x = np.zeros((n_dims, n_bins)) # centers of bins
# set dx
v = d_max-d_min
v_2 = np.dot(v.T, v)[0][0]
# bin count
for i in xrange(len_chain):
bin_no = int(np.floor(np.dot(chain[i].T-d_min,v)/v_2*n_bins)[0])
if n_bins > bin_no > -1:
p_x[bin_no] += 1.
# end count
dx = np.sqrt(v_2)/n_bins
p_x = p_x/(len_chain*dx)
# find error
for i in xrange(n_bins):
p = p_x[i]
error[i] = np.sqrt(p*(1./dx-p)/(len_chain))
x[:,i] = (d_min+v*(0.5+i)/n_bins)[0]
# end find
return x, p_x, error
|
Error Bars
create bars and error bars to plot
Inputs :
n_bins :
number of bins
plot_range : (shape) = (number of dimensions, 2)
matrix which contain the min and max for each dimension as rows
Outputs :
x :
domain
p_x :
estimated posterior using the chain on the domain
error :
estimated error for p_x
|
def primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y):
"""Returns the effective precession spin argument for the larger mass.
"""
spinx = primary_spin(mass1, mass2, spin1x, spin2x)
spiny = primary_spin(mass1, mass2, spin1y, spin2y)
return chi_perp_from_spinx_spiny(spinx, spiny)
|
Returns the effective precession spin argument for the larger mass.
|
def navigate(self, inst, kind, rel_id, phrase=''):
'''
Navigate across a link with some *rel_id* and *phrase* that yields
instances of some *kind*.
'''
key = (kind.upper(), rel_id, phrase)
if key in self.links:
link = self.links[key]
return link.navigate(inst)
link1, link2 = self._find_assoc_links(kind, rel_id, phrase)
inst_set = xtuml.OrderedSet()
for inst in link1.navigate(inst):
inst_set |= link2.navigate(inst)
return inst_set
|
Navigate across a link with some *rel_id* and *phrase* that yields
instances of some *kind*.
|
def _axis_properties(self, axis, title_size, title_offset, label_angle,
label_align, color):
"""Assign axis properties"""
if self.axes:
axis = [a for a in self.axes if a.scale == axis][0]
self._set_axis_properties(axis)
self._set_all_axis_color(axis, color)
if title_size:
axis.properties.title.font_size = ValueRef(value=title_size)
if label_angle:
axis.properties.labels.angle = ValueRef(value=label_angle)
if label_align:
axis.properties.labels.align = ValueRef(value=label_align)
if title_offset:
axis.properties.title.dy = ValueRef(value=title_offset)
else:
raise ValueError('This Visualization has no axes!')
|
Assign axis properties
|
def get_relationship_family_assignment_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with assigning relationships to families.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.RelationshipFamilyAssignmentSession)
- a ``RelationshipFamilyAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_relationship_family_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if ``supports_relationship_family_assignment()``
is ``true``.*
"""
if not self.supports_relationship_family_assignment():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.RelationshipFamilyAssignmentSession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the ``OsidSession`` associated with assigning relationships to families.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.RelationshipFamilyAssignmentSession)
- a ``RelationshipFamilyAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_relationship_family_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if ``supports_relationship_family_assignment()``
is ``true``.*
|
def open_url(absolute_or_relative_url):
"""
Loads a web page in the current browser session.
:param absolgenerateute_or_relative_url:
an absolute url to web page in case of config.base_url is not specified,
otherwise - relative url correspondingly
:Usage:
open_url('http://mydomain.com/subpage1')
open_url('http://mydomain.com/subpage2')
# OR
config.base_url = 'http://mydomain.com'
open_url('/subpage1')
open_url('/subpage2')
"""
# todo: refactor next line when app_host is removed
base_url = selene.config.app_host if selene.config.app_host else selene.config.base_url
driver().get(base_url + absolute_or_relative_url)
|
Loads a web page in the current browser session.
:param absolgenerateute_or_relative_url:
an absolute url to web page in case of config.base_url is not specified,
otherwise - relative url correspondingly
:Usage:
open_url('http://mydomain.com/subpage1')
open_url('http://mydomain.com/subpage2')
# OR
config.base_url = 'http://mydomain.com'
open_url('/subpage1')
open_url('/subpage2')
|
def collect(self, step, content):
'''given a name of a configuration key and the provided content, collect
the required metadata from the user.
Parameters
==========
step: the key in the configuration. Can be one of:
user_message_<name>
runtime_arg_<name>
record_asciinema
record_environment
user_prompt_<name>
content: the default value or boolean to indicate doing the step.
'''
# Option 1: The step is just a message to print to the user
if step.startswith('user_message'):
print(content)
# Option 2: The step is to collect a user prompt (if not at runtime)
elif step.startswith('user_prompt'):
self.collect_argument(step, content)
# Option 3: The step is to record an asciinema!
elif step == 'record_asciinema':
self.record_asciinema()
# Option 4: Record the user environment
elif step == "record_environment":
self.record_environment()
bot.debug(self.data)
|
given a name of a configuration key and the provided content, collect
the required metadata from the user.
Parameters
==========
step: the key in the configuration. Can be one of:
user_message_<name>
runtime_arg_<name>
record_asciinema
record_environment
user_prompt_<name>
content: the default value or boolean to indicate doing the step.
|
def _increment(sign, integer_part, non_repeating_part, base):
"""
Return an increment radix.
:param int sign: -1, 0, or 1 as appropriate
:param integer_part: the integer part
:type integer_part: list of int
:param non_repeating_part: the fractional part
:type non_repeating_part: list of int
:param int base: the base
:returns: a Radix object with ``non_repeating_part`` rounded up
:rtype: Radix
Complexity: O(len(non_repeating_part + integer_part)
"""
(carry, non_repeating_part) = \
Nats.carry_in(non_repeating_part, 1, base)
(carry, integer_part) = \
Nats.carry_in(integer_part, carry, base)
return Radix(
sign,
integer_part if carry == 0 else [carry] + integer_part,
non_repeating_part,
[],
base,
False
)
|
Return an increment radix.
:param int sign: -1, 0, or 1 as appropriate
:param integer_part: the integer part
:type integer_part: list of int
:param non_repeating_part: the fractional part
:type non_repeating_part: list of int
:param int base: the base
:returns: a Radix object with ``non_repeating_part`` rounded up
:rtype: Radix
Complexity: O(len(non_repeating_part + integer_part)
|
def emit(self, *args, **kwargs):
"""
Calls all the connected slots with the provided args and kwargs unless block is activated
"""
if self._block:
return
for slot in self._slots:
if not slot:
continue
elif isinstance(slot, partial):
slot()
elif isinstance(slot, weakref.WeakKeyDictionary):
# For class methods, get the class object and call the method accordingly.
for obj, method in slot.items():
method(obj, *args, **kwargs)
elif isinstance(slot, weakref.ref):
# If it's a weakref, call the ref to get the instance and then call the func
# Don't wrap in try/except so we don't risk masking exceptions from the actual func call
if (slot() is not None):
slot()(*args, **kwargs)
else:
# Else call it in a standard way. Should be just lambdas at this point
slot(*args, **kwargs)
|
Calls all the connected slots with the provided args and kwargs unless block is activated
|
def simple_mean_function(max_iters=100, optimize=True, plot=True):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
mf.update_gradients = lambda a,b: None
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot(plot_limits=(-10,15))
return m
|
The simplest possible mean function. No parameters, just a simple Sinusoid.
|
def _loop_wrapper_func(func, args, shared_mem_run, shared_mem_pause, interval, sigint, sigterm, name,
logging_level, conn_send, func_running, log_queue):
"""
to be executed as a separate process (that's why this functions is declared static)
"""
prefix = get_identifier(name) + ' '
global log
log = logging.getLogger(__name__+".log_{}".format(get_identifier(name, bold=False)))
log.setLevel(logging_level)
log.addHandler(QueueHandler(log_queue))
sys.stdout = StdoutPipe(conn_send)
log.debug("enter wrapper_func")
SIG_handler_Loop(sigint, sigterm, log, prefix)
func_running.value = True
error = False
while shared_mem_run.value:
try:
# in pause mode, simply sleep
if shared_mem_pause.value:
quit_loop = False
else:
# if not pause mode -> call func and see what happens
try:
quit_loop = func(*args)
except LoopInterruptError:
raise
except Exception as e:
log.error("error %s occurred in loop calling 'func(*args)'", type(e))
log.info("show traceback.print_exc()\n%s", traceback.format_exc())
error = True
break
if quit_loop is True:
log.debug("loop stooped because func returned True")
break
time.sleep(interval)
except LoopInterruptError:
log.debug("quit wrapper_func due to InterruptedError")
break
func_running.value = False
if error:
sys.exit(-1)
else:
log.debug("wrapper_func terminates gracefully")
# gets rid of the following warnings
# Exception ignored in: <_io.FileIO name='/dev/null' mode='rb'>
# ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='r' encoding='UTF-8'>
try:
if mp.get_start_method() == "spawn":
sys.stdin.close()
except AttributeError:
pass
|
to be executed as a separate process (that's why this functions is declared static)
|
def reindex(self, new_index=None, index_conf=None):
'''Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
'''
alias = self.index_name if self.es.indices.exists_alias(name=self.index_name) else None
if alias:
original_index=self.es.indices.get_alias(self.index_name).popitem()[0]
else:
original_index=self.index_name
if new_index is None:
mtc = re.match(r"^.*_v(\d)*$", original_index)
if mtc:
new_index = original_index[:mtc.start(1)] + str(int(mtc.group(1)) + 1)
else:
new_index = original_index + '_v1'
log.debug("Reindexing {{ alias: '{}', original_index: '{}', new_index: '{}'}}".format(alias, original_index, new_index))
self.clone_index(new_index, index_conf=index_conf)
if alias:
log.debug("Moving alias from ['{0}' -> '{1}'] to ['{0}' -> '{2}']".format(alias, original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "remove" : { "alias": alias, "index" : original_index} },
{ "add" : { "alias": alias, "index" : new_index } }
]})
log.debug("Deleting old index: '{}'".format(original_index))
self.es.indices.delete(original_index)
if not alias:
log.debug("Crating new alias: ['{0}' -> '{1}']".format(original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "add" : { "alias": original_index, "index" : new_index } }
]})
|
Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
|
def update(self, key=values.unset, value=values.unset):
"""
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
"""
data = values.of({'Key': key, 'Value': value, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return VariableInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
environment_sid=self._solution['environment_sid'],
sid=self._solution['sid'],
)
|
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
|
def read_config_environment(self, config_data=None, quiet=False):
"""read_config_environment is the second effort to get a username
and key to authenticate to the Kaggle API. The environment keys
are equivalent to the kaggle.json file, but with "KAGGLE_" prefix
to define a unique namespace.
Parameters
==========
config_data: a partially loaded configuration dictionary (optional)
quiet: suppress verbose print of output (default is False)
"""
# Add all variables that start with KAGGLE_ to config data
if config_data is None:
config_data = {}
for key, val in os.environ.items():
if key.startswith('KAGGLE_'):
config_key = key.replace('KAGGLE_', '', 1).lower()
config_data[config_key] = val
return config_data
|
read_config_environment is the second effort to get a username
and key to authenticate to the Kaggle API. The environment keys
are equivalent to the kaggle.json file, but with "KAGGLE_" prefix
to define a unique namespace.
Parameters
==========
config_data: a partially loaded configuration dictionary (optional)
quiet: suppress verbose print of output (default is False)
|
def check_blank_before_after_class(self, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives false-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D211(blanks_before_count)
if blanks_before_count != 1:
yield violations.D203(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield violations.D204(blanks_after_count)
|
D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
|
def home(request):
"Simple homepage view."
context = {}
if request.user.is_authenticated():
try:
access = request.user.accountaccess_set.all()[0]
except IndexError:
access = None
else:
client = access.api_client
context['info'] = client.get_profile_info(raw_token=access.access_token)
return render(request, 'home.html', context)
|
Simple homepage view.
|
def draw(graph, fname):
"""Draw a graph and save it into a file"""
ag = networkx.nx_agraph.to_agraph(graph)
ag.draw(fname, prog='dot')
|
Draw a graph and save it into a file
|
def _make_attachment(self, attachment, str_encoding=None):
"""Returns EmailMessage.attachments item formatted for sending with Mailjet
Returns mailjet_dict, is_inline_image
"""
is_inline_image = False
if isinstance(attachment, MIMEBase):
name = attachment.get_filename()
content = attachment.get_payload(decode=True)
mimetype = attachment.get_content_type()
if attachment.get_content_maintype() == 'image' and attachment['Content-ID'] is not None:
is_inline_image = True
name = attachment['Content-ID']
else:
(name, content, mimetype) = attachment
# Guess missing mimetype from filename, borrowed from
# django.core.mail.EmailMessage._create_attachment()
if mimetype is None and name is not None:
mimetype, _ = mimetypes.guess_type(name)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
try:
# noinspection PyUnresolvedReferences
if isinstance(content, unicode):
# Python 2.x unicode string
content = content.encode(str_encoding)
except NameError:
# Python 3 doesn't differentiate between strings and unicode
# Convert python3 unicode str to bytes attachment:
if isinstance(content, str):
content = content.encode(str_encoding)
content_b64 = b64encode(content)
mj_attachment = {
'Content-type': mimetype,
'Filename': name or '',
'content': content_b64.decode('ascii'),
}
return mj_attachment, is_inline_image
|
Returns EmailMessage.attachments item formatted for sending with Mailjet
Returns mailjet_dict, is_inline_image
|
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset):
'''
This sets a VXR to be the first and last VXR in the VDR
'''
# VDR's VXRhead
self._update_offset_value(f, vdr_offset+28, 8, VXRoffset)
# VDR's VXRtail
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
|
This sets a VXR to be the first and last VXR in the VDR
|
def derive_data(data):
""" Based on the data derive additional data """
for s_name, values in data.items():
# setup holding variable
# Sum all variants that have been called
total_called_variants = 0
for value_name in ['TOTAL_SNPS', 'TOTAL_COMPLEX_INDELS', 'TOTAL_MULTIALLELIC_SNPS', 'TOTAL_INDELS']:
total_called_variants = total_called_variants + int(values[value_name])
values['total_called_variants'] = total_called_variants
# Sum all variants that have been called and are known
total_called_variants_known = 0
for value_name in ['NUM_IN_DB_SNP', 'NUM_IN_DB_SNP_COMPLEX_INDELS', 'NUM_IN_DB_SNP_MULTIALLELIC']:
total_called_variants_known = total_called_variants_known + int(values[value_name])
total_called_variants_known = total_called_variants_known + int(values['TOTAL_INDELS']) - int(values['NOVEL_INDELS'])
values['total_called_variants_known'] = total_called_variants_known
# Extrapolate the total novel variants
values['total_called_variants_novel'] = total_called_variants - total_called_variants_known
|
Based on the data derive additional data
|
def _generate_routes(self, namespace):
"""
Generates Python methods that correspond to routes in the namespace.
"""
# Hack: needed for _docf()
self.cur_namespace = namespace
# list of auth_types supported in this base class.
# this is passed with the new -w flag
if self.args.auth_type is not None:
self.supported_auth_types = [auth_type.strip().lower() for auth_type in self.args.auth_type.split(',')]
check_route_name_conflict(namespace)
for route in namespace.routes:
# compatibility mode : included routes are passed by whitelist
# actual auth attr inluded in the route is ignored in this mode.
if self.supported_auth_types is None:
self._generate_route_helper(namespace, route)
if route.attrs.get('style') == 'download':
self._generate_route_helper(namespace, route, True)
else:
route_auth_attr = None
if route.attrs is not None:
route_auth_attr = route.attrs.get('auth')
if route_auth_attr is None:
continue
route_auth_modes = [mode.strip().lower() for mode in route_auth_attr.split(',')]
for base_auth_type in self.supported_auth_types:
if base_auth_type in route_auth_modes:
self._generate_route_helper(namespace, route)
if route.attrs.get('style') == 'download':
self._generate_route_helper(namespace, route, True)
break
|
Generates Python methods that correspond to routes in the namespace.
|
def run_decider_state(self, decider_state, child_errors, final_outcomes_dict):
""" Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the
barrier concurrency is left.
:param decider_state: the decider state of the barrier concurrency state
:param child_errors: error of the concurrent branches
:param final_outcomes_dict: dictionary of all outcomes of the concurrent branches
:return:
"""
decider_state.state_execution_status = StateExecutionStatus.ACTIVE
# forward the decider specific data
decider_state.child_errors = child_errors
decider_state.final_outcomes_dict = final_outcomes_dict
# standard state execution
decider_state.input_data = self.get_inputs_for_state(decider_state)
decider_state.output_data = self.create_output_dictionary_for_state(decider_state)
decider_state.start(self.execution_history, backward_execution=False)
decider_state.join()
decider_state_error = None
if decider_state.final_outcome.outcome_id == -1:
if 'error' in decider_state.output_data:
decider_state_error = decider_state.output_data['error']
# standard output data processing
self.add_state_execution_output_to_scoped_data(decider_state.output_data, decider_state)
self.update_scoped_variables_with_output_dictionary(decider_state.output_data, decider_state)
return decider_state_error
|
Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the
barrier concurrency is left.
:param decider_state: the decider state of the barrier concurrency state
:param child_errors: error of the concurrent branches
:param final_outcomes_dict: dictionary of all outcomes of the concurrent branches
:return:
|
def find_or_new(self, id, columns=None):
"""
Find a model by its primary key or return new instance of the related model.
:param id: The primary key
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:rtype: Collection or Model
"""
if columns is None:
columns = ["*"]
instance = self._query.find(id, columns)
if instance is None:
instance = self._related.new_instance()
instance.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
return instance
|
Find a model by its primary key or return new instance of the related model.
:param id: The primary key
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:rtype: Collection or Model
|
def circle(radius=None, center=None, **kwargs):
"""
Create a Path2D containing a single or multiple rectangles
with the specified bounds.
Parameters
--------------
bounds : (2, 2) float, or (m, 2, 2) float
Minimum XY, Maximum XY
Returns
-------------
rect : Path2D
Path containing specified rectangles
"""
from .path import Path2D
if center is None:
center = [0.0, 0.0]
else:
center = np.asanyarray(center, dtype=np.float64)
if radius is None:
radius = 1.0
else:
radius = float(radius)
# (3, 2) float, points on arc
three = arc.to_threepoint(angles=[0, np.pi],
center=center,
radius=radius) + center
result = Path2D(entities=[entities.Arc(points=np.arange(3), closed=True)],
vertices=three,
**kwargs)
return result
|
Create a Path2D containing a single or multiple rectangles
with the specified bounds.
Parameters
--------------
bounds : (2, 2) float, or (m, 2, 2) float
Minimum XY, Maximum XY
Returns
-------------
rect : Path2D
Path containing specified rectangles
|
def apply_order(self):
'''Naively apply query orders.'''
self._ensure_modification_is_safe()
if len(self.query.orders) > 0:
self._iterable = Order.sorted(self._iterable, self.query.orders)
|
Naively apply query orders.
|
def set_position(self, resource_id, to_position, db_session=None, *args, **kwargs):
"""
Sets node position for new node in the tree
:param resource_id: resource to move
:param to_position: new position
:param db_session:
:return:def count_children(cls, resource_id, db_session=None):
"""
return self.service.set_position(
resource_id=resource_id,
to_position=to_position,
db_session=db_session,
*args,
**kwargs
)
|
Sets node position for new node in the tree
:param resource_id: resource to move
:param to_position: new position
:param db_session:
:return:def count_children(cls, resource_id, db_session=None):
|
def _dispense_plunger_position(self, ul):
"""Calculate axis position for a given liquid volume.
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette.
Calibration of the pipette motor's ul-to-mm conversion is required
"""
millimeters = ul / self._ul_per_mm(ul, 'dispense')
destination_mm = self._get_plunger_position('bottom') + millimeters
return round(destination_mm, 6)
|
Calculate axis position for a given liquid volume.
Translates the passed liquid volume to absolute coordinates
on the axis associated with this pipette.
Calibration of the pipette motor's ul-to-mm conversion is required
|
def piprot():
"""Parse the command line arguments and jump into the piprot() function
(unless the user just wants the post request hook).
"""
cli_parser = argparse.ArgumentParser(
epilog="Here's hoping your requirements are nice and fresh!"
)
cli_parser.add_argument(
'-v', '--verbose', action='store_true',
help='verbosity, can be supplied more than once '
'(enabled by default, use --quiet to disable)'
)
cli_parser.add_argument('-l', '--latest', action='store_true',
help='print the lastest available version for out '
'of date requirements')
cli_parser.add_argument('-x', '--verbatim', action='store_true',
help='output the full requirements file, with '
'added comments with potential updates')
cli_parser.add_argument('-q', '--quiet', action='store_true',
help='be a little less verbose with the output '
'(<0.3 behaviour)')
cli_parser.add_argument('-o', '--outdated', action='store_true',
help='only list outdated requirements')
cli_parser.add_argument('-g', '--github',
help='Test the requirements from a GitHub repo. '
'Requires that a `requirements.txt` file '
'exists in the root of the repository.')
cli_parser.add_argument(
'-b', '--branch',
help='The branch to test requirements from, used with '
'the Github URL support.')
cli_parser.add_argument(
'-t', '--token',
help='Github personal access token to be used with '
'the Github URL support.')
cli_parser.add_argument(
'-p', '--path',
help='Path to requirements file in remote repository.')
cli_parser.add_argument(
'-d', '--delay',
help='Delay before an outdated package triggers an error.'
'(in days, default to 1).')
cli_parser.add_argument('-u', '--url',
help='URL to requirements file.')
# if there is a requirements.txt file, use it by default. Otherwise print
# usage if there are no arguments.
nargs = '+'
if (
'--github' in sys.argv
or '-g' in sys.argv
or '-u' in sys.argv
or '--url' in sys.argv
):
nargs = "*"
default = None
if os.path.isfile('requirements.txt'):
nargs = "*"
default = [open('requirements.txt')]
cli_parser.add_argument('file', nargs=nargs, type=argparse.FileType(),
default=default, help='requirements file(s), use '
'`-` for stdin')
cli_args = cli_parser.parse_args()
if len(cli_args.file) > 1 and cli_args.verbatim:
sys.exit('--verbatim only allowed for single requirements files')
verbose = True
if cli_args.quiet:
verbose = False
elif cli_args.verbatim:
verbose = False
# call the main function to kick off the real work
main(req_files=cli_args.file, verbose=verbose, outdated=cli_args.outdated,
latest=cli_args.latest, verbatim=cli_args.verbatim,
repo=cli_args.github, branch=cli_args.branch, path=cli_args.path,
token=cli_args.token, url=cli_args.url, delay=cli_args.delay)
|
Parse the command line arguments and jump into the piprot() function
(unless the user just wants the post request hook).
|
def setsockopt(self, *sockopts):
"""Add socket options to set"""
if type(sockopts[0]) in (list, tuple):
for sock_opt in sockopts[0]:
level, option, value = sock_opt
self.connection.sockopts.add((level, option, value))
else:
level, option, value = sockopts
self.connection.sockopts.add((level, option, value))
|
Add socket options to set
|
def check_int_param(self, param, low, high, name):
"""
Check if the value of the given parameter is in the given range
and an int.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (int or similar)
* `low` : lowest allowed value (int), or None
* `high` : highest allowed value (int), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
"""
try:
param = int(param)
except:
raise ValueError(
'Parameter {} is not int or similar'.format(name)
)
if low != None or high != None:
if not low <= param <= high:
raise ValueError('Parameter {} is not in range <{}, {}>'
.format(name, low, high))
return param
|
Check if the value of the given parameter is in the given range
and an int.
Designed for testing parameters like `mu` and `eps`.
To pass this function the variable `param` must be able to be converted
into a float with a value between `low` and `high`.
**Args:**
* `param` : parameter to check (int or similar)
* `low` : lowest allowed value (int), or None
* `high` : highest allowed value (int), or None
* `name` : name of the parameter (string), it is used for an error message
**Returns:**
* `param` : checked parameter converted to float
|
def set_references(references, components):
"""
Sets references to multiple components.
To set references components must implement [[IReferenceable]] interface.
If they don't the call to this method has no effect.
:param references: the references to be set.
:param components: a list of components to set the references to.
"""
if components == None:
return
for component in components:
Referencer.set_references_for_one(references, component)
|
Sets references to multiple components.
To set references components must implement [[IReferenceable]] interface.
If they don't the call to this method has no effect.
:param references: the references to be set.
:param components: a list of components to set the references to.
|
def clear_task_instances(tis,
session,
activate_dag_runs=True,
dag=None,
):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
:param tis: a list of task instances
:param session: current session
:param activate_dag_runs: flag to check for active dag run
:param dag: DAG object
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number - 1)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
from airflow.models.dagrun import DagRun # Avoid circular import
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
|
Clears a set of task instances, but makes sure the running ones
get killed.
:param tis: a list of task instances
:param session: current session
:param activate_dag_runs: flag to check for active dag run
:param dag: DAG object
|
def printActiveIndices(self, state, andValues=False):
"""
Print the list of ``[column, cellIdx]`` indices for each of the active cells
in state.
:param state: TODO: document
:param andValues: TODO: document
"""
if len(state.shape) == 2:
(cols, cellIdxs) = state.nonzero()
else:
cols = state.nonzero()[0]
cellIdxs = numpy.zeros(len(cols))
if len(cols) == 0:
print "NONE"
return
prevCol = -1
for (col, cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
print "] ",
print "Col %d: [" % (col),
prevCol = col
if andValues:
if len(state.shape) == 2:
value = state[col, cellIdx]
else:
value = state[col]
print "%d: %s," % (cellIdx, value),
else:
print "%d," % (cellIdx),
print "]"
|
Print the list of ``[column, cellIdx]`` indices for each of the active cells
in state.
:param state: TODO: document
:param andValues: TODO: document
|
def _rmv_deps_answer(self):
"""Remove dependencies answer
"""
if self.meta.remove_deps_answer in ["y", "Y"]:
remove_dep = self.meta.remove_deps_answer
else:
try:
remove_dep = raw_input(
"\nRemove dependencies (maybe used by "
"other packages) [y/N]? ")
print("")
except EOFError:
print("") # new line at exit
raise SystemExit()
return remove_dep
|
Remove dependencies answer
|
def available_discounts(cls, user, categories, products):
''' Returns all discounts available to this user for the given
categories and products. The discounts also list the available quantity
for this user, not including products that are pending purchase. '''
filtered_clauses = cls._filtered_clauses(user)
# clauses that match provided categories
categories = set(categories)
# clauses that match provided products
products = set(products)
# clauses that match categories for provided products
product_categories = set(product.category for product in products)
# (Not relevant: clauses that match products in provided categories)
all_categories = categories | product_categories
filtered_clauses = (
clause for clause in filtered_clauses
if hasattr(clause, 'product') and clause.product in products or
hasattr(clause, 'category') and clause.category in all_categories
)
discounts = []
# Markers so that we don't need to evaluate given conditions
# more than once
accepted_discounts = set()
failed_discounts = set()
for clause in filtered_clauses:
discount = clause.discount
cond = ConditionController.for_condition(discount)
past_use_count = clause.past_use_count
if past_use_count >= clause.quantity:
# This clause has exceeded its use count
pass
elif discount not in failed_discounts:
# This clause is still available
is_accepted = discount in accepted_discounts
if is_accepted or cond.is_met(user, filtered=True):
# This clause is valid for this user
discounts.append(DiscountAndQuantity(
discount=discount,
clause=clause,
quantity=clause.quantity - past_use_count,
))
accepted_discounts.add(discount)
else:
# This clause is not valid for this user
failed_discounts.add(discount)
return discounts
|
Returns all discounts available to this user for the given
categories and products. The discounts also list the available quantity
for this user, not including products that are pending purchase.
|
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain))
|
Record an API access.
|
def list_tags(self, pattern: str = None) -> typing.List[str]:
"""
Returns list of tags, optionally matching "pattern"
:param pattern: optional pattern to filter results
:type pattern: str
:return: existing tags
:rtype: list of str
"""
tags: typing.List[str] = [str(tag) for tag in self.repo.tags]
if not pattern:
LOGGER.debug('tags found in repo: %s', tags)
return tags
LOGGER.debug('filtering tags with pattern: %s', pattern)
filtered_tags: typing.List[str] = [tag for tag in tags if pattern in tag]
LOGGER.debug('filtered tags: %s', filtered_tags)
return filtered_tags
|
Returns list of tags, optionally matching "pattern"
:param pattern: optional pattern to filter results
:type pattern: str
:return: existing tags
:rtype: list of str
|
def register(self, *model_list, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = options.pop('databrowse_class',
DefaultModelDatabrowse)
for model in model_list:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' %
model.__name__)
self.registry[model] = databrowse_class
|
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
|
def get_pdf_from_html(html: str,
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> bytes:
"""
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
"""
result = make_pdf_from_html(
on_disk=False,
html=html,
header_html=header_html,
footer_html=footer_html,
wkhtmltopdf_filename=wkhtmltopdf_filename,
wkhtmltopdf_options=wkhtmltopdf_options,
file_encoding=file_encoding,
debug_options=debug_options,
debug_content=debug_content,
debug_wkhtmltopdf_args=debug_wkhtmltopdf_args,
fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug,
processor=processor,
) # type: bytes
return result
|
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
|
def get(self):
"""
*get the conesearch object*
**Return:**
- ``conesearch``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
"""
self.log.info('starting the ``get`` method')
# SEARCH NED WITH SINGLE CONESEARCHES TO RETURN LIST OF MATCHED NAMES
names, searchParams = self.get_crossmatch_names(
listOfCoordinates=self.listOfCoordinates,
radiusArcsec=self.arcsec
)
# NOW PERFORM A NAME SEARCH AGAINST THE MATCHED NAMES
search = namesearch.namesearch(
log=self.log,
names=names,
quiet=False,
searchParams=searchParams,
verbose=self.verbose,
outputFilePath=self.outputFilePath
)
search.get()
self.log.info('completed the ``get`` method')
return conesearch
|
*get the conesearch object*
**Return:**
- ``conesearch``
.. todo::
- @review: when complete, clean get method
- @review: when complete add logging
|
def syllable_split(string):
'''Split 'string' into (stressed) syllables and punctuation/whitespace.'''
p = r'\'[%s]+|`[%s]+|[%s]+|[^%s\'`\.]+|[^\.]{1}' % (A, A, A, A)
return re.findall(p, string, flags=FLAGS)
|
Split 'string' into (stressed) syllables and punctuation/whitespace.
|
def validate(self, value):
"""
Applies the validation criteria.
Returns value, new value, or None if invalid.
Overload this in derived classes.
"""
try:
# trap blank fields here
if not self.blank or value:
v = int(value)
if v < 0:
return None
return value
except ValueError:
return None
|
Applies the validation criteria.
Returns value, new value, or None if invalid.
Overload this in derived classes.
|
def _has_data(self):
"""Check if there is any data"""
return any([
len([
v for a in (s[0] if is_list_like(s) else [s])
for v in (a if is_list_like(a) else [a]) if v is not None
]) for s in self.raw_series
])
|
Check if there is any data
|
def set_maintext(self, index):
"""Set the maintext_lb to display text information about the given reftrack
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
text = ""
model = index.model()
for i in (1, 2, 3, 5, 6):
new = model.index(index.row(), i, index.parent()).data(dr)
if new is not None:
text = " | ".join((text, new)) if text else new
self.maintext_lb.setText(text)
|
Set the maintext_lb to display text information about the given reftrack
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
def t_PLUS(self, t):
r"\+"
t.endlexpos = t.lexpos + len(t.value)
return t
|
r"\+
|
def get_blocks_overview(block_representation_list, coin_symbol='btc', txn_limit=None, api_key=None):
'''
Batch request version of get_blocks_overview
'''
for block_representation in block_representation_list:
assert is_valid_block_representation(
block_representation=block_representation,
coin_symbol=coin_symbol)
assert is_valid_coin_symbol(coin_symbol)
blocks = ';'.join([str(x) for x in block_representation_list])
url = make_url(coin_symbol, **dict(blocks=blocks))
logger.info(url)
params = {}
if api_key:
params['token'] = api_key
if txn_limit:
params['limit'] = txn_limit
r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
r = get_valid_json(r)
return [_clean_tx(response_dict=d) for d in r]
|
Batch request version of get_blocks_overview
|
def match_http_version(entry, http_version, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
"""
response_version = entry['response']['httpVersion']
if regex:
return re.search(http_version, response_version,
flags=re.IGNORECASE) is not None
else:
return response_version == http_version
|
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
|
def url_builder(self, endpoint, *, root=None, params=None, url_params=None):
"""Create a URL for the specified endpoint.
Arguments:
endpoint (:py:class:`str`): The API endpoint to access.
root: (:py:class:`str`, optional): The root URL for the
service API.
params: (:py:class:`dict`, optional): The values for format
into the created URL (defaults to ``None``).
url_params: (:py:class:`dict`, optional): Parameters to add
to the end of the URL (defaults to ``None``).
Returns:
:py:class:`str`: The resulting URL.
"""
if root is None:
root = self.ROOT
scheme, netloc, path, _, _ = urlsplit(root)
return urlunsplit((
scheme,
netloc,
urljoin(path, endpoint),
urlencode(url_params or {}),
'',
)).format(**params or {})
|
Create a URL for the specified endpoint.
Arguments:
endpoint (:py:class:`str`): The API endpoint to access.
root: (:py:class:`str`, optional): The root URL for the
service API.
params: (:py:class:`dict`, optional): The values for format
into the created URL (defaults to ``None``).
url_params: (:py:class:`dict`, optional): Parameters to add
to the end of the URL (defaults to ``None``).
Returns:
:py:class:`str`: The resulting URL.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.