code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _ParseFileEntry(self, knowledge_base, file_entry):
"""Parses a file entry for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
PreProcessFail: if the preprocessing fails.
"""
file_object = file_entry.GetFileObject()
try:
self._ParseFileData(knowledge_base, file_object)
finally:
file_object.close()
|
Parses a file entry for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
PreProcessFail: if the preprocessing fails.
|
def isnumber(self, string, *args):
"""Is number
args:
string (str): match
returns:
bool
"""
try:
n, u = utility.analyze_number(string)
except SyntaxError:
return False
return True
|
Is number
args:
string (str): match
returns:
bool
|
def begin_run_group(project):
"""
Begin a run_group in the database.
A run_group groups a set of runs for a given project. This models a series
of runs that form a complete binary runtime test.
Args:
project: The project we begin a new run_group for.
Returns:
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in.
"""
from benchbuild.utils.db import create_run_group
from datetime import datetime
group, session = create_run_group(project)
group.begin = datetime.now()
group.status = 'running'
session.commit()
return group, session
|
Begin a run_group in the database.
A run_group groups a set of runs for a given project. This models a series
of runs that form a complete binary runtime test.
Args:
project: The project we begin a new run_group for.
Returns:
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in.
|
def get_choices(timezones, grouped=False):
"""Retrieves timezone choices from any iterable (normally pytz)."""
# Created a namedtuple to store the "key" for the choices_dict
TZOffset = namedtuple('TZOffset', 'value offset_string')
choices_dict = defaultdict(list)
# Iterate through the timezones and populate the timezone choices
for tz in iter(timezones):
# Retrieve a datetime object in this time zone
now = datetime.now(pytz.timezone(tz))
# Retrieve the timezone offset ("-0500" / "+0500")
offset = now.strftime("%z")
# Retrieve the offset string ("GMT-12:00" / "GMT+12:00")
timezone_offset_string = 'GMT{plus_minus}{hours}:{minutes}'.format(
**TIMEZONE_OFFSET_REGEX.match(offset).groupdict()
)
if not grouped:
# Format the timezone display string
display_string = '({timezone_offset_string}) {tz}'.format(
timezone_offset_string=timezone_offset_string,
tz=tz,
)
else:
display_string = tz
choices_dict[
TZOffset(value=int(offset), offset_string=timezone_offset_string)
].append(
(tz, display_string)
)
choices = []
for tz_offset in sorted(choices_dict, key=attrgetter('value')):
if not grouped:
choices.extend(
tuple(choices_dict[tz_offset])
)
else:
choices.append(
(
tz_offset.offset_string,
tuple(choices_dict[tz_offset])
)
)
# Cast the timezone choices to a tuple and return
return tuple(choices)
|
Retrieves timezone choices from any iterable (normally pytz).
|
def handle_task(self, uuid_task, worker=None):
"""Handle snapshotted event."""
uuid, task = uuid_task
if task.worker and task.worker.hostname:
worker = self.handle_worker(
(task.worker.hostname, task.worker),
)
defaults = {
'name': task.name,
'args': task.args,
'kwargs': task.kwargs,
'eta': correct_awareness(maybe_iso8601(task.eta)),
'expires': correct_awareness(maybe_iso8601(task.expires)),
'state': task.state,
'tstamp': fromtimestamp(task.timestamp),
'result': task.result or task.exception,
'traceback': task.traceback,
'runtime': task.runtime,
'worker': worker
}
# Some fields are only stored in the RECEIVED event,
# so we should remove these from default values,
# so that they are not overwritten by subsequent states.
[defaults.pop(attr, None) for attr in NOT_SAVED_ATTRIBUTES
if defaults[attr] is None]
return self.update_task(task.state,
task_id=uuid, defaults=defaults)
|
Handle snapshotted event.
|
def parse(self, text):
"""Parses and renders a text as HTML regarding current format.
"""
if self.format == 'markdown':
try:
import markdown
except ImportError:
raise RuntimeError(u"Looks like markdown is not installed")
if text.startswith(u'\ufeff'): # check for unicode BOM
text = text[1:]
return markdown.markdown(text, extensions=self.md_extensions)
elif self.format == 'restructuredtext':
try:
from landslide.rst import html_body
except ImportError:
raise RuntimeError(u"Looks like docutils are not installed")
html = html_body(text, input_encoding=self.encoding)
# RST generates pretty much markup to be removed in our case
for (pattern, replacement, mode) in self.RST_REPLACEMENTS:
html = re.sub(re.compile(pattern, mode), replacement, html, 0)
return html.strip()
elif self.format == 'textile':
try:
import textile
except ImportError:
raise RuntimeError(u"Looks like textile is not installed")
text = text.replace('\n---\n', '\n<hr />\n')
return textile.textile(text, encoding=self.encoding)
else:
raise NotImplementedError(u"Unsupported format %s, cannot parse"
% self.format)
|
Parses and renders a text as HTML regarding current format.
|
def import_rsa_key(pem_data):
"""
Extract an RSA key from a PEM-encoded X.509 certificate
:param pem_data: RSA key encoded in standard form
:return: rsa.RSAPublicKey instance
"""
if not pem_data.startswith(PREFIX):
pem_data = bytes('{}\n{}\n{}'.format(PREFIX, pem_data, POSTFIX),
'utf-8')
else:
pem_data = bytes(pem_data, 'utf-8')
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
return cert.public_key()
|
Extract an RSA key from a PEM-encoded X.509 certificate
:param pem_data: RSA key encoded in standard form
:return: rsa.RSAPublicKey instance
|
def _get_base(role, **conn):
"""
Determine whether the boto get_role call needs to be made or if we already have all that data
in the role object.
:param role: dict containing (at the very least) role_name and/or arn.
:param conn: dict containing enough information to make a connection to the desired account.
:return: Camelized dict describing role containing all all base_fields.
"""
base_fields = frozenset(['Arn', 'AssumeRolePolicyDocument', 'Path', 'RoleId', 'RoleName', 'CreateDate'])
needs_base = False
for field in base_fields:
if field not in role:
needs_base = True
break
if needs_base:
role_name = _get_name_from_structure(role, 'RoleName')
role = CloudAux.go('iam.client.get_role', RoleName=role_name, **conn)
role = role['Role']
# cast CreateDate from a datetime to something JSON serializable.
role.update(dict(CreateDate=get_iso_string(role['CreateDate'])))
role['_version'] = 3
return role
|
Determine whether the boto get_role call needs to be made or if we already have all that data
in the role object.
:param role: dict containing (at the very least) role_name and/or arn.
:param conn: dict containing enough information to make a connection to the desired account.
:return: Camelized dict describing role containing all all base_fields.
|
def stop(self):
"""
Stop services and requestors and then connection.
:return: self
"""
LOGGER.debug("natsd.Driver.stop")
for requester in self.requester_registry:
requester.stop()
self.requester_registry.clear()
for service in self.services_registry:
if service.is_started:
service.stop()
self.services_registry.clear()
return self
|
Stop services and requestors and then connection.
:return: self
|
def get_active_choices(self, language_code=None, site_id=None):
"""
Find out which translations should be visible in the site.
It returns a list with either a single choice (the current language),
or a list with the current language + fallback language.
"""
if language_code is None:
language_code = get_language()
lang_dict = self.get_language(language_code, site_id=site_id)
if not lang_dict['hide_untranslated']:
return [language_code] + [lang for lang in lang_dict['fallbacks'] if lang != language_code]
else:
return [language_code]
|
Find out which translations should be visible in the site.
It returns a list with either a single choice (the current language),
or a list with the current language + fallback language.
|
def refresh_jwt_token(self, token, override_access_lifespan=None):
"""
Creates a new token for a user if and only if the old token's access
permission is expired but its refresh permission is not yet expired.
The new token's refresh expiration moment is the same as the old
token's, but the new token's access expiration is refreshed
:param: token: The existing jwt token that needs to
be replaced with a new, refreshed
token
:param: override_access_lifespan: Override's the instance's access
lifespan to set a custom duration
after which the new token's
accessability will expire. May not
exceed the refresh lifespan
"""
moment = pendulum.now('UTC')
# Note: we disable exp verification because we do custom checks here
with InvalidTokenHeader.handle_errors('failed to decode JWT token'):
data = jwt.decode(
token,
self.encode_key,
algorithms=self.allowed_algorithms,
options={'verify_exp': False},
)
self._validate_jwt_data(data, access_type=AccessType.refresh)
user = self.user_class.identify(data['id'])
self._check_user(user)
if override_access_lifespan is None:
access_lifespan = self.access_lifespan
else:
access_lifespan = override_access_lifespan
refresh_expiration = data['rf_exp']
access_expiration = min(
(moment + access_lifespan).int_timestamp,
refresh_expiration,
)
custom_claims = {
k: v for (k, v) in data.items() if k not in RESERVED_CLAIMS
}
payload_parts = dict(
iat=moment.int_timestamp,
exp=access_expiration,
rf_exp=refresh_expiration,
jti=data['jti'],
id=data['id'],
rls=','.join(user.rolenames),
**custom_claims
)
return jwt.encode(
payload_parts, self.encode_key, self.encode_algorithm,
).decode('utf-8')
|
Creates a new token for a user if and only if the old token's access
permission is expired but its refresh permission is not yet expired.
The new token's refresh expiration moment is the same as the old
token's, but the new token's access expiration is refreshed
:param: token: The existing jwt token that needs to
be replaced with a new, refreshed
token
:param: override_access_lifespan: Override's the instance's access
lifespan to set a custom duration
after which the new token's
accessability will expire. May not
exceed the refresh lifespan
|
def remove(self):
"""
remove this endpoint from Ariane server
:return:
"""
LOGGER.debug("Endpoint.remove - " + self.id + " - " + self.url)
if self.id is None:
return None
else:
if self.id in EndpointService.local_cache_by_id:
for search_params in EndpointService.local_cache_by_id[self.id]:
if search_params in EndpointService.local_cache_by_params.keys():
EndpointService.local_cache_by_params.pop(search_params)
EndpointService.local_cache_by_id.pop(self.id)
params = SessionService.complete_transactional_req({
'ID': self.id
})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'deleteEndpoint'
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': 'delete', 'parameters': params}
response = EndpointService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc != 0:
LOGGER.warning(
'Endpoint.remove - Problem while deleting endpoint ' + str(self.id) +
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Endpoint.remove",
ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
return self
else:
if self.parent_node is not None:
self.parent_node.sync()
return None
|
remove this endpoint from Ariane server
:return:
|
def sub_array_2d_from_sub_array_1d(self, sub_array_1d):
""" Map a 1D sub-array the same dimension as the sub-grid (e.g. including sub-pixels) to its original masked
2D sub array.
Parameters
-----------
sub_array_1d : ndarray
The 1D sub_array which is mapped to its masked 2D sub-array.
"""
sub_shape = (self.mask.shape[0] * self.sub_grid_size, self.mask.shape[1] * self.sub_grid_size)
sub_one_to_two = self.mask.masked_sub_grid_index_to_sub_pixel(sub_grid_size=self.sub_grid_size)
return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(
array_1d=sub_array_1d, shape=sub_shape, one_to_two=sub_one_to_two)
|
Map a 1D sub-array the same dimension as the sub-grid (e.g. including sub-pixels) to its original masked
2D sub array.
Parameters
-----------
sub_array_1d : ndarray
The 1D sub_array which is mapped to its masked 2D sub-array.
|
def str2url(str):
"""
Takes a UTF-8 string and replaces all characters with the equivalent in 7-bit
ASCII. It returns a plain ASCII string usable in URLs.
"""
try:
str = str.encode('utf-8')
except:
pass
mfrom = "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîï"
to = "AAAAAAECEEEEIIIIDNOOOOOOUUUUYSaaaaaaaceeeeiiii"
mfrom += "ñòóôõöøùúûüýÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģ"
to += "noooooouuuuyyaaaaaaccccccccddddeeeeeeeeeegggggggg"
mfrom += "ĤĥĦħĨĩĪīĬĭĮįİıĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘř"
to += "hhhhiiiiiiiiiijjkkkllllllllllnnnnnnnnnoooooooorrrrrr"
mfrom += "ŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀƂƃƄƅƇƈƉƊƐƑƒƓƔ"
to += "ssssssssttttttuuuuuuuuuuuuwwyyyzzzzzzfbbbbbccddeffgv"
mfrom += "ƖƗƘƙƚƝƞƟƠƤƦƫƬƭƮƯưƱƲƳƴƵƶǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩ"
to += "likklnnoopettttuuuuyyzzaaiioouuuuuuuuuueaaaaeeggggkk"
mfrom += "ǪǫǬǭǰǴǵǷǸǹǺǻǼǽǾǿȀȁȂȃȄȅȆȇȈȉȊȋȌȍȎȏȐȑȒȓȔȕȖȗȘșȚțȞȟȤȥȦȧȨȩ"
to += "oooojggpnnaaeeooaaaaeeeeiiiioooorrrruuuusstthhzzaaee"
mfrom += "ȪȫȬȭȮȯȰȱȲȳḀḁḂḃḄḅḆḇḈḉḊḋḌḍḎḏḐḑḒḓḔḕḖḗḘḙḚḛḜḝḞḟḠḡḢḣḤḥḦḧḨḩḪḫ"
to += "ooooooooyyaabbbbbbccddddddddddeeeeeeeeeeffgghhhhhhhhhh"
mfrom += "ḬḭḮḯḰḱḲḳḴḵḶḷḸḹḺḻḼḽḾḿṀṁṂṃṄṅṆṇṈṉṊṋṌṍṎṏṐṑṒṓṔṕṖṗṘṙṚṛṜṝṞṟ"
to += "iiiikkkkkkllllllllmmmmmmnnnnnnnnoooooooopppprrrrrrrr"
mfrom += "ṠṡṢṣṤṥṦṧṨṩṪṫṬṭṮṯṰṱṲṳṴṵṶṷṸṹṺṻṼṽṾṿẀẁẂẃẄẅẆẇẈẉẊẋẌẍẎẏẐẑẒẓẔẕ"
to += "ssssssssssttttttttuuuuuuuuuuvvvvwwwwwwwwwwxxxxxyzzzzzz"
mfrom += "ẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊị"
to += "htwyafaaaaaaaaaaaaaaaaaaaaaaaaeeeeeeeeeeeeeeeeiiii"
mfrom += "ỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹ"
to += "oooooooooooooooooooooooouuuuuuuuuuuuuuyyyyyyyy"
for i in zip(mfrom, to):
str = str.replace(*i)
return str
|
Takes a UTF-8 string and replaces all characters with the equivalent in 7-bit
ASCII. It returns a plain ASCII string usable in URLs.
|
def valuefrompostdata(self, postdata):
"""This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set()"""
if self.id in postdata and postdata[self.id] != '':
return float(postdata[self.id])
else:
return None
|
This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set()
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListItemContext for this SyncListItemInstance
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
"""
if self._context is None:
self._context = SyncListItemContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=self._solution['index'],
)
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListItemContext for this SyncListItemInstance
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
|
def toVName(name, stripNum=0, upper=False):
"""
Turn a Python name into an iCalendar style name,
optionally uppercase and with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
|
Turn a Python name into an iCalendar style name,
optionally uppercase and with characters stripped off.
|
def delete(self, subnet_id):
"""
This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP.
"""
# 1 : show subnet
subnet = self.client.describe_subnets(
SubnetIds=[subnet_id]).get('Subnets')[0]
vpc_id = subnet.get('VpcId')
# 2 : delete subnet
self.client.delete_subnet(SubnetId=subnet_id)
# 3 : delete vpc
return self.client.delete_vpc(VpcId=vpc_id)
|
This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP.
|
def getSkeletalSummaryData(self, action):
"""Reads summary information about the current pose of the skeleton associated with the given action."""
fn = self.function_table.getSkeletalSummaryData
pSkeletalSummaryData = VRSkeletalSummaryData_t()
result = fn(action, byref(pSkeletalSummaryData))
return result, pSkeletalSummaryData
|
Reads summary information about the current pose of the skeleton associated with the given action.
|
def _get_namespace2go2term(go2terms):
"""Group GO IDs by namespace."""
namespace2go2term = cx.defaultdict(dict)
for goid, goterm in go2terms.items():
namespace2go2term[goterm.namespace][goid] = goterm
return namespace2go2term
|
Group GO IDs by namespace.
|
def agreement_weighted(ci, wts):
'''
D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the
exception that each partitions contribution is weighted according to
the corresponding scalar value stored in the vector WTS. As an example,
suppose CI contained partitions obtained using some heuristic for
maximizing modularity. A possible choice for WTS might be the Q metric
(Newman's modularity score). Such a choice would add more weight to
higher modularity partitions.
NOTE: Unlike AGREEMENT, this script does not have the input argument
BUFFSZ.
Parameters
----------
ci : MxN np.ndarray
set of M (possibly degenerate) partitions of N nodes
wts : Mx1 np.ndarray
relative weight of each partition
Returns
-------
D : NxN np.ndarray
weighted agreement matrix
'''
ci = np.array(ci)
m, n = ci.shape
wts = np.array(wts) / np.sum(wts)
D = np.zeros((n, n))
for i in range(m):
d = dummyvar(ci[i, :].reshape(1, n))
D += np.dot(d, d.T) * wts[i]
return D
|
D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the
exception that each partitions contribution is weighted according to
the corresponding scalar value stored in the vector WTS. As an example,
suppose CI contained partitions obtained using some heuristic for
maximizing modularity. A possible choice for WTS might be the Q metric
(Newman's modularity score). Such a choice would add more weight to
higher modularity partitions.
NOTE: Unlike AGREEMENT, this script does not have the input argument
BUFFSZ.
Parameters
----------
ci : MxN np.ndarray
set of M (possibly degenerate) partitions of N nodes
wts : Mx1 np.ndarray
relative weight of each partition
Returns
-------
D : NxN np.ndarray
weighted agreement matrix
|
def birth(self):
'''
Create the individual (compute the spline curve)
'''
splineReal = scipy.interpolate.splrep(self.x, self.y.real)
self.y_int.real = scipy.interpolate.splev(self.x_int,splineReal)
splineImag = scipy.interpolate.splrep(self.x, self.y.imag)
self.y_int.imag = scipy.interpolate.splev(self.x_int,splineImag)
|
Create the individual (compute the spline curve)
|
def create(cls, interface_id, logical_interface_ref, **kw):
"""
:param int interface_id: the interface id
:param str logical_ref: logical interface reference, must be unique from
inline intfs
:rtype: dict
"""
data = {'inspect_unspecified_vlans': True,
'logical_interface_ref': logical_interface_ref,
'nicid': str(interface_id)}
if 'reset_interface_nicid' in kw:
data.update(reset_interface_nicid=kw.get('reset_interface_nicid'))
return cls(data)
|
:param int interface_id: the interface id
:param str logical_ref: logical interface reference, must be unique from
inline intfs
:rtype: dict
|
def _validate_data(dataset, target, features=None, validation_set='auto'):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != 'auto':
raise TypeError('Unrecognized value for validation_set.')
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either 'auto', None, or an "
"SFrame matching the training data.")
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set
|
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
|
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if not text.strip():
if state == 0:
if _readline_available:
readline.insert_text('\t')
readline.redisplay()
return ''
else:
return '\t'
else:
return None
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
|
Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
|
def gamma(self, gamma=1.0):
"""Apply gamma correction to the channels of the image. If *gamma* is a
tuple, then it should have as many elements as the channels of the
image, and the gamma correction is applied elementwise. If *gamma* is a
number, the same gamma correction is applied on every channel, if there
are several channels in the image. The behaviour of :func:`gamma` is
undefined outside the normal [0,1] range of the channels.
"""
if(isinstance(gamma, (list, tuple, set)) and
len(gamma) != len(self.channels)):
raise ValueError("Number of channels and gamma components differ.")
if isinstance(gamma, (tuple, list)):
gamma_list = list(gamma)
else:
gamma_list = [gamma] * len(self.channels)
for i in range(len(self.channels)):
gamma = float(gamma_list[i])
if gamma < 0:
raise ValueError("Gamma correction must be a positive number.")
logger.debug("Applying gamma %f", gamma)
if gamma == 1.0:
continue
if isinstance(self.channels[i], np.ma.core.MaskedArray):
if ne:
self.channels[i] = np.ma.array(
ne.evaluate("data ** (1.0 / gamma)",
local_dict={"data": self.channels[i].data,
'gamma': gamma}),
mask=self.channels[i].mask,
copy=False)
else:
self.channels[i] = np.ma.array(self.channels[i].data **
(1.0 / gamma),
mask=self.channels[i].mask,
copy=False)
else:
self.channels[i] = np.where(self.channels[i] >= 0,
self.channels[i] **
(1.0 / gamma),
self.channels[i])
|
Apply gamma correction to the channels of the image. If *gamma* is a
tuple, then it should have as many elements as the channels of the
image, and the gamma correction is applied elementwise. If *gamma* is a
number, the same gamma correction is applied on every channel, if there
are several channels in the image. The behaviour of :func:`gamma` is
undefined outside the normal [0,1] range of the channels.
|
def normalize_map_between(dictionary, norm_min, norm_max):
"""
Performs linear normalization of all values in Map between normMin and normMax
:param: map Map to normalize values for
:param: normMin Smallest normalized value
:param: normMax Largest normalized value
:return: A new map with double values within [normMin, normMax]
"""
if len(dictionary) < 2:
return {}
values = list(dictionary.values())
norm_range = norm_max - norm_min
map_min = min(values)
map_range = max(values) - map_min
range_factor = norm_range / float(map_range)
normalized_map = {}
for key, value in dictionary.items():
normalized_map[key] = norm_min + (value - map_min) * range_factor
return normalized_map
|
Performs linear normalization of all values in Map between normMin and normMax
:param: map Map to normalize values for
:param: normMin Smallest normalized value
:param: normMax Largest normalized value
:return: A new map with double values within [normMin, normMax]
|
def _autobox(content, format):
'''
Autobox response content.
:param content: Response content
:type content: str
:param format: Format to return
:type format: `yaxil.Format`
:returns: Autoboxed content
:rtype: dict|xml.etree.ElementTree.Element|csvreader
'''
if format == Format.JSON:
return json.loads(content)
elif format == Format.XML:
return etree.fromstring(content)
elif format == Format.CSV:
try:
return csv.reader(io.BytesIO(content))
except TypeError:
# as per https://docs.python.org/2/library/csv.html#examples
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
return unicode_csv_reader(io.StringIO(content))
else:
raise AutoboxError("unknown autobox format %s" % format)
|
Autobox response content.
:param content: Response content
:type content: str
:param format: Format to return
:type format: `yaxil.Format`
:returns: Autoboxed content
:rtype: dict|xml.etree.ElementTree.Element|csvreader
|
def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
"""
Check the difference between predictions from MXNet and CoreML.
"""
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_copy = {}
for ip in shapes:
input_data[ip] = _np.random.rand(*shapes[ip]).astype('f')
input_data_copy[ip] = _np.copy(input_data[ip])
dataIter = _mxnet.io.NDArrayIter(input_data_copy)
mx_out = model.predict(dataIter).flatten()
e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
e_out = e_out_dict[output].flatten()
error = _np.linalg.norm(e_out - mx_out)
if verbose:
print("First few predictions from CoreML : %s" % e_out[0:10])
print("First few predictions from MXNet : %s" % e_out[0:10])
print("L2 Error on random data %s" % error)
return error
|
Check the difference between predictions from MXNet and CoreML.
|
def _fchown(self, real, fileno, uid, gid):
"""Run fake fchown code if fileno points to a sub-path of our tree.
The ownership set with this fake fchown can be inspected by looking
at the self.uid/self.gid dictionaries.
"""
path = self._fake_path(self._path_from_fd(fileno))
self._chown_common(path, uid, gid)
|
Run fake fchown code if fileno points to a sub-path of our tree.
The ownership set with this fake fchown can be inspected by looking
at the self.uid/self.gid dictionaries.
|
def _args_from_dict(ddata: Mapping[str, Any]):
"""Allows to construct an instance of AnnData from a dictionary.
Acts as interface for the communication with the hdf5 file.
In particular, from a dict that has been written using
``AnnData._to_dict_fixed_width_arrays``.
"""
d_true_keys = {}
# backwards compat
uns_is_not_key = False
valid_keys = []
for keys in AnnData._H5_ALIASES.values():
valid_keys += keys
valid_keys += ['raw.X', 'raw.var', 'raw.varm', 'raw.cat']
for key in ddata.keys():
# if there is another key then the prdedefined
# then we are reading the old format
if key not in valid_keys:
uns_is_not_key = True
for true_key, keys in AnnData._H5_ALIASES.items():
for key in keys:
if key in ddata:
d_true_keys[true_key] = ddata[key]
if uns_is_not_key: del ddata[key]
break
else:
d_true_keys[true_key] = None
# transform recarray to dataframe
for true_key, keys in AnnData._H5_ALIASES_NAMES.items():
if d_true_keys[true_key] is not None:
for key in keys:
if key in d_true_keys[true_key].dtype.names:
d_true_keys[true_key] = pd.DataFrame.from_records(
d_true_keys[true_key], index=key)
break
d_true_keys[true_key].index = d_true_keys[true_key].index.astype('U')
# transform to unicode string
# TODO: this is quite a hack
for c in d_true_keys[true_key].columns:
if is_string_dtype(d_true_keys[true_key][c]):
d_true_keys[true_key][c] = pd.Index(
d_true_keys[true_key][c]).astype('U').values
# these are the category fields
k_to_delete = []
items = (
ddata.items() if uns_is_not_key
else ddata['uns'].items() if 'uns' in ddata else []
)
for k, v in items:
if k.endswith('_categories'):
k_stripped = k.replace('_categories', '')
if isinstance(v, (str, int)): # fix categories with a single category
v = [v]
for ann in ['obs', 'var']:
if k_stripped in d_true_keys[ann]:
d_true_keys[ann][k_stripped] = pd.Categorical.from_codes(
codes=d_true_keys[ann][k_stripped].values,
categories=v,
)
k_to_delete.append(k)
for k in k_to_delete:
if uns_is_not_key:
del ddata[k]
else:
del ddata['uns'][k]
# assign the variables
X = d_true_keys['X']
obs = d_true_keys['obs']
obsm = d_true_keys['obsm']
var = d_true_keys['var']
varm = d_true_keys['varm']
layers = d_true_keys['layers']
raw = None
if 'raw.X' in ddata:
raw = {}
raw['X'] = ddata['raw.X']
del ddata['raw.X']
# get the dataframe
raw['var'] = pd.DataFrame.from_records(
ddata['raw.var'], index='index')
del ddata['raw.var']
raw['var'].index = raw['var'].index.astype('U')
# transform to unicode string
for c in raw['var'].columns:
if is_string_dtype(raw['var'][c]):
raw['var'][c] = pd.Index(raw['var'][c]).astype('U').values
# these are the category fields
if 'raw.cat' in ddata: # old h5ad didn't have that field
for k, v in ddata['raw.cat'].items():
if k.endswith('_categories'):
k_stripped = k.replace('_categories', '')
if isinstance(v, (str, int)): # fix categories with a single category
v = [v]
raw['var'][k_stripped] = pd.Categorical.from_codes(
codes=raw['var'][k_stripped].values,
categories=v)
del ddata['raw.cat']
if 'raw.varm' in ddata:
raw['varm'] = ddata['raw.varm']
del ddata['raw.varm']
elif raw is not None:
raw['varm'] = None
# the remaining fields are the unstructured annotation
uns = (
ddata if uns_is_not_key
else ddata['uns'] if 'uns' in ddata
else {}
)
return X, obs, var, uns, obsm, varm, layers, raw
|
Allows to construct an instance of AnnData from a dictionary.
Acts as interface for the communication with the hdf5 file.
In particular, from a dict that has been written using
``AnnData._to_dict_fixed_width_arrays``.
|
def copy(self, new_name=None):
""" Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy
"""
_tmp = copy.deepcopy(self)
if not new_name:
new_name = self.name + '_copy'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
_tmp.meta.note('IOSystem copy {new} based on {old}'.format(
new=new_name, old=self.meta.name))
_tmp.meta.change_meta('name', new_name, log=False)
else:
_tmp.name = new_name
return _tmp
|
Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy
|
def add(self, key, value, time, compress_level=-1):
"""
Add a key/value to server ony if it does not exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is added False if key already exists
:rtype: bool
"""
return self._set_add_replace('add', key, value, time, compress_level=compress_level)
|
Add a key/value to server ony if it does not exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is added False if key already exists
:rtype: bool
|
def convert_to_str(d):
"""
Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied.
"""
d2 = {}
for k, v in d.items():
k = str(k)
if type(v) in [list, tuple]:
d2[k] = [str(a) for a in v]
elif type(v) is dict:
d2[k] = convert_to_str(v)
else:
d2[k] = str(v)
return d2
|
Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied.
|
def evaluate_binop_logical(self, operation, left, right, **kwargs):
"""
Evaluate given logical binary operation with given operands.
"""
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation '{}'".format(operation))
result = self.binops_logical[operation](left, right)
return bool(result)
|
Evaluate given logical binary operation with given operands.
|
def fail_remaining(self):
"""
Mark all unfinished tasks (including currently running ones) as
failed.
"""
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set()
|
Mark all unfinished tasks (including currently running ones) as
failed.
|
def boxcox_trans(p, **kwargs):
"""
Boxcox Transformation
Parameters
----------
p : float
Power parameter, commonly denoted by
lower-case lambda in formulae
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`.
"""
if np.abs(p) < 1e-7:
return log_trans()
def transform(x):
return (x**p - 1) / (p * np.sign(x-1))
def inverse(x):
return (np.abs(x) * p + np.sign(x)) ** (1 / p)
kwargs['p'] = p
kwargs['name'] = kwargs.get('name', 'pow_{}'.format(p))
kwargs['transform'] = transform
kwargs['inverse'] = inverse
return trans_new(**kwargs)
|
Boxcox Transformation
Parameters
----------
p : float
Power parameter, commonly denoted by
lower-case lambda in formulae
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`.
|
def process_data(key, data_list, result_info_key, identifier_keys):
""" Given a key as the endpoint name, pulls the data for that endpoint out
of the data_list for each address, processes the data into a more
excel-friendly format and returns that data.
Args:
key: the endpoint name of the data to process
data_list: the main data list to take the data from
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
Returns:
A list of dicts (rows) to be written to a worksheet
"""
master_data = []
for item_data in data_list:
data = item_data[key]
if data is None:
current_item_data = {}
else:
if key == 'property/value':
current_item_data = data['value']
elif key == 'property/details':
top_level_keys = ['property', 'assessment']
current_item_data = flatten_top_level_keys(data, top_level_keys)
elif key == 'property/school':
current_item_data = data['school']
school_list = []
for school_type_key in current_item_data:
schools = current_item_data[school_type_key]
for school in schools:
school['school_type'] = school_type_key
school['school_address'] = school['address']
school['school_zipcode'] = school['zipcode']
school_list.append(school)
current_item_data = school_list
elif key == 'property/value_forecast':
current_item_data = {}
for month_key in data:
current_item_data[month_key] = data[month_key]['value']
elif key in ['property/value_within_block', 'property/rental_value_within_block']:
current_item_data = flatten_top_level_keys(data, [
'housecanary_value_percentile_range',
'housecanary_value_sqft_percentile_range',
'client_value_percentile_range',
'client_value_sqft_percentile_range'
])
elif key in ['property/zip_details', 'zip/details']:
top_level_keys = ['multi_family', 'single_family']
current_item_data = flatten_top_level_keys(data, top_level_keys)
else:
current_item_data = data
if isinstance(current_item_data, dict):
_set_identifier_fields(current_item_data, item_data, result_info_key, identifier_keys)
master_data.append(current_item_data)
else:
# it's a list
for item in current_item_data:
_set_identifier_fields(item, item_data, result_info_key, identifier_keys)
master_data.extend(current_item_data)
return master_data
|
Given a key as the endpoint name, pulls the data for that endpoint out
of the data_list for each address, processes the data into a more
excel-friendly format and returns that data.
Args:
key: the endpoint name of the data to process
data_list: the main data list to take the data from
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
Returns:
A list of dicts (rows) to be written to a worksheet
|
async def add(self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None):
"""
Stores the value in the given key with ttl if specified. Raises an error if the
key already exists.
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if key is inserted
:raises:
- ValueError if key already exists
- :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
ns_key = self.build_key(key, namespace=namespace)
await self._add(ns_key, dumps(value), ttl=self._get_ttl(ttl), _conn=_conn)
logger.debug("ADD %s %s (%.4f)s", ns_key, True, time.monotonic() - start)
return True
|
Stores the value in the given key with ttl if specified. Raises an error if the
key already exists.
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if key is inserted
:raises:
- ValueError if key already exists
- :class:`asyncio.TimeoutError` if it lasts more than self.timeout
|
def describe_enum_value(enum_value):
"""Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance.
"""
enum_value_descriptor = EnumValueDescriptor()
enum_value_descriptor.name = six.text_type(enum_value.name)
enum_value_descriptor.number = enum_value.number
return enum_value_descriptor
|
Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance.
|
def set_description(self, vrf_name, description=None, default=False,
disable=False):
""" Configures the VRF description
Args:
vrf_name (str): The VRF name to configure
description(str): The string to set the vrf description to
default (bool): Configures the vrf description to its default value
disable (bool): Negates the vrf description
Returns:
True if the operation was successful otherwise False
"""
cmds = self.command_builder('description', value=description,
default=default, disable=disable)
return self.configure_vrf(vrf_name, cmds)
|
Configures the VRF description
Args:
vrf_name (str): The VRF name to configure
description(str): The string to set the vrf description to
default (bool): Configures the vrf description to its default value
disable (bool): Negates the vrf description
Returns:
True if the operation was successful otherwise False
|
def register_custom_adapter(cls, target_class, adapter):
"""
:type target_class: type
:type adapter: JsonAdapter|type
:rtype: None
"""
class_name = target_class.__name__
if adapter.can_serialize():
cls._custom_serializers[class_name] = adapter
if adapter.can_deserialize():
cls._custom_deserializers[class_name] = adapter
|
:type target_class: type
:type adapter: JsonAdapter|type
:rtype: None
|
def sample(self, num_rows=1):
"""Creates sintentic values stadistically similar to the original dataset.
Args:
num_rows: `int` amount of samples to generate.
Returns:
np.ndarray: Sampled data.
"""
self.check_fit()
res = {}
means = np.zeros(self.covariance.shape[0])
size = (num_rows,)
clean_cov = np.nan_to_num(self.covariance)
samples = np.random.multivariate_normal(means, clean_cov, size=size)
for i, (label, distrib) in enumerate(self.distribs.items()):
cdf = stats.norm.cdf(samples[:, i])
res[label] = distrib.percent_point(cdf)
return pd.DataFrame(data=res)
|
Creates sintentic values stadistically similar to the original dataset.
Args:
num_rows: `int` amount of samples to generate.
Returns:
np.ndarray: Sampled data.
|
def create_panel_of_normals(items, group_id, work_dir):
"""Create a panel of normals from one or more background read counts.
"""
out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
params = ["-T", "CreateReadCountPanelOfNormals",
"-O", tx_out_file,
"--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])]
for data in items:
params += ["-I", tz.get_in(["depth", "bins", "target"], data)]
_run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True)
return out_file
|
Create a panel of normals from one or more background read counts.
|
def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
# Was not a local, safe to delete:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object)
|
Restore the saved value for the attribute of the object.
|
def render(self, size):
"""
render identicon to PIL.Image
@param size identicon patchsize. (image size is 3 * [size])
@return PIL.Image
"""
# decode the code
middle, corner, side, foreColor, backColor = self.decode(self.code)
size = int(size)
# make image
image = Image.new("RGB", (size * 3, size * 3))
draw = ImageDraw.Draw(image)
# fill background
draw.rectangle((0, 0, image.size[0], image.size[1]), fill=0)
kwds = {
'draw': draw,
'size': size,
'foreColor': foreColor,
'backColor': backColor}
# middle patch
self.drawPatch((1, 1), middle[2], middle[1], middle[0], **kwds)
# side patch
kwds['type'] = side[0]
for i in range(4):
pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i]
self.drawPatch(pos, side[2] + 1 + i, side[1], **kwds)
# corner patch
kwds['type'] = corner[0]
for i in range(4):
pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i]
self.drawPatch(pos, corner[2] + 1 + i, corner[1], **kwds)
return image
|
render identicon to PIL.Image
@param size identicon patchsize. (image size is 3 * [size])
@return PIL.Image
|
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N
|
average over N points
|
def get_comments(self, project, work_item_id, top=None, continuation_token=None, include_deleted=None, expand=None, order=None):
"""GetComments.
[Preview API] Returns a list of work item comments, pageable.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item to get comments for.
:param int top: Max number of comments to return.
:param str continuation_token: Used to query for the next page of comments.
:param bool include_deleted: Specify if the deleted comments should be retrieved.
:param str expand: Specifies the additional data retrieval options for work item comments.
:param str order: Order in which the comments should be returned.
:rtype: :class:`<CommentList> <azure.devops.v5_1.work-item-tracking.models.CommentList>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if work_item_id is not None:
route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if order is not None:
query_parameters['order'] = self._serialize.query('order', order, 'str')
response = self._send(http_method='GET',
location_id='608aac0a-32e1-4493-a863-b9cf4566d257',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('CommentList', response)
|
GetComments.
[Preview API] Returns a list of work item comments, pageable.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item to get comments for.
:param int top: Max number of comments to return.
:param str continuation_token: Used to query for the next page of comments.
:param bool include_deleted: Specify if the deleted comments should be retrieved.
:param str expand: Specifies the additional data retrieval options for work item comments.
:param str order: Order in which the comments should be returned.
:rtype: :class:`<CommentList> <azure.devops.v5_1.work-item-tracking.models.CommentList>`
|
def get_small_image_url(self, page=1):
"""
Returns the URL for the small sized image of a single page.
The page kwarg specifies which page to return. One is the default.
"""
template = self.resources.page.get('image')
return template.replace(
"{page}",
str(page)
).replace("{size}", "small")
|
Returns the URL for the small sized image of a single page.
The page kwarg specifies which page to return. One is the default.
|
def make_relationship_aggregate(self, relationship):
"""
Returns a new relationship aggregate for the given relationship.
:param relationship: Instance of
:class:`everest.entities.relationship.DomainRelationship`.
"""
if not self._session.IS_MANAGING_BACKREFERENCES:
relationship.direction &= ~RELATIONSHIP_DIRECTIONS.REVERSE
return RelationshipAggregate(self, relationship)
|
Returns a new relationship aggregate for the given relationship.
:param relationship: Instance of
:class:`everest.entities.relationship.DomainRelationship`.
|
def put_account(self, headers=None, query=None, cdn=False, body=None):
"""
PUTs the account and returns the results. This is usually
done with the extract-archive bulk upload request and has no
other use I know of (but the call is left open in case there
ever is).
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: Some account PUT requests, like the
extract-archive bulk upload request, take a body.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
"""
return self.request(
'PUT', '', body or '', headers, query=query, cdn=cdn)
|
PUTs the account and returns the results. This is usually
done with the extract-archive bulk upload request and has no
other use I know of (but the call is left open in case there
ever is).
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: Some account PUT requests, like the
extract-archive bulk upload request, take a body.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body.
|
def PC_AC1_calc(P, TOP, POP):
"""
Calculate percent chance agreement for Gwet's AC1.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float
"""
try:
result = 0
classes = list(P.keys())
for i in classes:
pi = ((P[i] + TOP[i]) / (2 * POP[i]))
result += pi * (1 - pi)
result = result / (len(classes) - 1)
return result
except Exception:
return "None"
|
Calculate percent chance agreement for Gwet's AC1.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float
|
def get_template(cls, message, messenger):
"""Get a template path to compile a message.
1. `tpl` field of message context;
2. `template` field of message class;
3. deduced from message, messenger data and `template_ext` message type field
(e.g. `sitemessage/messages/plain__smtp.txt` for `plain` message type).
:param Message message: Message model
:param MessengerBase messenger: a MessengerBase heir
:return: str
:rtype: str
"""
template = message.context.get('tpl', None)
if template: # Template name is taken from message context.
return template
if cls.template is None:
cls.template = 'sitemessage/messages/%s__%s.%s' % (
cls.get_alias(), messenger.get_alias(), cls.template_ext
)
return cls.template
|
Get a template path to compile a message.
1. `tpl` field of message context;
2. `template` field of message class;
3. deduced from message, messenger data and `template_ext` message type field
(e.g. `sitemessage/messages/plain__smtp.txt` for `plain` message type).
:param Message message: Message model
:param MessengerBase messenger: a MessengerBase heir
:return: str
:rtype: str
|
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None
|
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
|
def decipher(self,string):
"""Decipher string using Foursquare cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string.
"""
string = self.remove_punctuation(string)
if len(string)%2 == 1: string = string + 'X'
ret = ''
for c in range(0,len(string.upper()),2):
a,b = self.decipher_pair(string[c],string[c+1])
ret += a + b
return ret
|
Decipher string using Foursquare cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string.
|
def linkify(self):
"""
Check exclusion for each timeperiod
:return: None
"""
for t_id in self.items:
timeperiod = self.items[t_id]
timeperiod.linkify(self)
|
Check exclusion for each timeperiod
:return: None
|
def push(self, kv):
""" Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict.
"""
if kv[0] in self:
self.__delitem__(kv[0])
self.__setitem__(kv[0], kv[1])
|
Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict.
|
def run_command_on_marathon_leader(
command,
username=None,
key_path=None,
noisy=True
):
""" Run a command on the Marathon leader
"""
return run_command(shakedown.marathon_leader_ip(), command, username, key_path, noisy)
|
Run a command on the Marathon leader
|
def ToByteArray(self):
"""
Serialize self and get the byte stream.
Returns:
bytes: serialized object.
"""
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
self.Serialize(writer)
retval = ms.ToArray()
StreamManager.ReleaseStream(ms)
return retval
|
Serialize self and get the byte stream.
Returns:
bytes: serialized object.
|
def parse_sentence(self, string):
""" Parses the given sentence. BASIC commands must be
types UPPERCASE and as SEEN in ZX BASIC. e.g. GO SUB for gosub, etc...
"""
result = []
def shift(string_):
""" Returns first word of a string, and remaining
"""
string_ = string_.strip() # Remove spaces and tabs
if not string_: # Avoid empty strings
return '', ''
i = string_.find(' ')
if i == -1:
command_ = string_
string_ = ''
else:
command_ = string_[:i]
string_ = string_[i:]
return command_, string_
command, string = shift(string)
while command != '':
result += self.token(command)
|
Parses the given sentence. BASIC commands must be
types UPPERCASE and as SEEN in ZX BASIC. e.g. GO SUB for gosub, etc...
|
def enterEvent( self, event ):
"""
Toggles the display for the tracker item.
"""
item = self.trackerItem()
if ( item ):
item.setVisible(True)
|
Toggles the display for the tracker item.
|
def _check_std(self, paths, cmd_pieces):
"""
Run `cmd` as a check on `paths`.
"""
cmd_pieces.extend(paths)
process = Popen(cmd_pieces, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
lines = out.strip().splitlines() + err.strip().splitlines()
result = []
for line in lines:
match = self.tool_err_re.match(line)
if not match:
if self.break_on_tool_re_mismatch:
raise ValueError(
'Unexpected `%s` output: %r' % (
' '.join(cmd_pieces),
paths,
line))
continue
vals = match.groupdict()
# All tools should at least give us line numbers, but only
# some give column numbers.
vals['lineno'] = int(vals['lineno'])
vals['colno'] = \
int(vals['colno']) if vals['colno'] is not None else ''
result.append(vals)
return result
|
Run `cmd` as a check on `paths`.
|
def _get_pydot(self):
"""Return pydot package. Load pydot, if necessary."""
if self.pydot:
return self.pydot
self.pydot = __import__("pydot")
return self.pydot
|
Return pydot package. Load pydot, if necessary.
|
def add_response_headers(h):
"""
Add HTTP-headers to response.
Example:
@add_response_headers({'Refresh': '10', 'X-Powered-By': 'Django'})
def view(request):
....
"""
def headers_wrapper(fun):
def wrapped_function(*args, **kwargs):
response = fun(*args, **kwargs)
for k, v in h.iteritems():
response[k] = v
return response
return wrapped_function
return headers_wrapper
|
Add HTTP-headers to response.
Example:
@add_response_headers({'Refresh': '10', 'X-Powered-By': 'Django'})
def view(request):
....
|
def clear_jobs():
'''Clear old jobs
:param days: Jobs for how many days should be kept (default: 10)
:type days: integer
:statuscode 200: no error
:statuscode 403: not authorized to delete jobs
:statuscode 409: an error occurred
'''
if not is_authorized():
return json.dumps({'error': 'not authorized'}), 403, headers
days = flask.request.args.get('days', None)
return _clear_jobs(days)
|
Clear old jobs
:param days: Jobs for how many days should be kept (default: 10)
:type days: integer
:statuscode 200: no error
:statuscode 403: not authorized to delete jobs
:statuscode 409: an error occurred
|
def bootstrap_app():
'''
Grab the opts dict of the master config by trying to import Salt
'''
from salt.netapi.rest_cherrypy import app
import salt.config
__opts__ = salt.config.client_config(
os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master'))
return app.get_app(__opts__)
|
Grab the opts dict of the master config by trying to import Salt
|
def show_text_glyphs(self, text, glyphs, clusters, cluster_flags=0):
"""This operation has rendering effects similar to :meth:`show_glyphs`
but, if the target surface supports it
(see :meth:`Surface.has_show_text_glyphs`),
uses the provided text and cluster mapping
to embed the text for the glyphs shown in the output.
If the target does not support the extended attributes,
this method acts like the basic :meth:`show_glyphs`
as if it had been passed :obj:`glyphs`.
The mapping between :obj:`text` and :obj:`glyphs`
is provided by an list of clusters.
Each cluster covers a number of UTF-8 text bytes and glyphs,
and neighboring clusters cover neighboring areas
of :obj:`text` and :obj:`glyphs`.
The clusters should collectively cover :obj:`text` and :obj:`glyphs`
in entirety.
:param text:
The text to show, as an Unicode or UTF-8 string.
Because of how :obj:`clusters` work,
using UTF-8 bytes might be more convenient.
:param glyphs:
A list of glyphs.
Each glyph is a ``(glyph_id, x, y)`` tuple.
:obj:`glyph_id` is an opaque integer.
Its exact interpretation depends on the font technology being used.
:obj:`x` and :obj:`y` are the float offsets
in the X and Y direction
between the origin used for drawing or measuring the string
and the origin of this glyph.
Note that the offsets are not cumulative.
When drawing or measuring text,
each glyph is individually positioned
with respect to the overall origin.
:param clusters:
A list of clusters.
A text cluster is a minimal mapping of some glyphs
corresponding to some UTF-8 text,
represented as a ``(num_bytes, num_glyphs)`` tuple of integers,
the number of UTF-8 bytes and glyphs covered by the cluster.
For a cluster to be valid,
both :obj:`num_bytes` and :obj:`num_glyphs` should be non-negative,
and at least one should be non-zero.
Note that clusters with zero glyphs
are not as well supported as normal clusters.
For example, PDF rendering applications
typically ignore those clusters when PDF text is being selected.
:type cluster_flags: int
:param cluster_flags:
Flags (as a bit field) for the cluster mapping.
The first cluster always covers bytes
from the beginning of :obj:`text`.
If :obj:`cluster_flags` does not have
the :obj:`TEXT_CLUSTER_FLAG_BACKWARD` flag set,
the first cluster also covers the beginning of :obj:`glyphs`,
otherwise it covers the end of the :obj:`glyphs` list
and following clusters move backward.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
clusters = ffi.new('cairo_text_cluster_t[]', clusters)
cairo.cairo_show_text_glyphs(
self._pointer, _encode_string(text), -1,
glyphs, len(glyphs), clusters, len(clusters), cluster_flags)
self._check_status()
|
This operation has rendering effects similar to :meth:`show_glyphs`
but, if the target surface supports it
(see :meth:`Surface.has_show_text_glyphs`),
uses the provided text and cluster mapping
to embed the text for the glyphs shown in the output.
If the target does not support the extended attributes,
this method acts like the basic :meth:`show_glyphs`
as if it had been passed :obj:`glyphs`.
The mapping between :obj:`text` and :obj:`glyphs`
is provided by an list of clusters.
Each cluster covers a number of UTF-8 text bytes and glyphs,
and neighboring clusters cover neighboring areas
of :obj:`text` and :obj:`glyphs`.
The clusters should collectively cover :obj:`text` and :obj:`glyphs`
in entirety.
:param text:
The text to show, as an Unicode or UTF-8 string.
Because of how :obj:`clusters` work,
using UTF-8 bytes might be more convenient.
:param glyphs:
A list of glyphs.
Each glyph is a ``(glyph_id, x, y)`` tuple.
:obj:`glyph_id` is an opaque integer.
Its exact interpretation depends on the font technology being used.
:obj:`x` and :obj:`y` are the float offsets
in the X and Y direction
between the origin used for drawing or measuring the string
and the origin of this glyph.
Note that the offsets are not cumulative.
When drawing or measuring text,
each glyph is individually positioned
with respect to the overall origin.
:param clusters:
A list of clusters.
A text cluster is a minimal mapping of some glyphs
corresponding to some UTF-8 text,
represented as a ``(num_bytes, num_glyphs)`` tuple of integers,
the number of UTF-8 bytes and glyphs covered by the cluster.
For a cluster to be valid,
both :obj:`num_bytes` and :obj:`num_glyphs` should be non-negative,
and at least one should be non-zero.
Note that clusters with zero glyphs
are not as well supported as normal clusters.
For example, PDF rendering applications
typically ignore those clusters when PDF text is being selected.
:type cluster_flags: int
:param cluster_flags:
Flags (as a bit field) for the cluster mapping.
The first cluster always covers bytes
from the beginning of :obj:`text`.
If :obj:`cluster_flags` does not have
the :obj:`TEXT_CLUSTER_FLAG_BACKWARD` flag set,
the first cluster also covers the beginning of :obj:`glyphs`,
otherwise it covers the end of the :obj:`glyphs` list
and following clusters move backward.
|
def orthogonal_basis(self):
"""Return an orthogonal basis to this direction.
Note
----
Only implemented in 3D.
Returns
-------
:obj:`tuple` of :obj:`Direction`
The pair of normalized Direction vectors that form a basis of
this direction's orthogonal complement.
Raises
------
NotImplementedError
If the vector is not 3D
"""
if self.dim == 3:
x_arr = np.array([-self.data[1], self.data[0], 0])
if np.linalg.norm(x_arr) == 0:
x_arr = np.array([self.data[2], 0, 0])
x_arr = x_arr / np.linalg.norm(x_arr)
y_arr = np.cross(self.data, x_arr)
return Direction(x_arr, frame=self.frame), Direction(y_arr, frame=self.frame)
raise NotImplementedError('Orthogonal basis only supported for 3 dimensions')
|
Return an orthogonal basis to this direction.
Note
----
Only implemented in 3D.
Returns
-------
:obj:`tuple` of :obj:`Direction`
The pair of normalized Direction vectors that form a basis of
this direction's orthogonal complement.
Raises
------
NotImplementedError
If the vector is not 3D
|
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
|
name-addr = [display-name] angle-addr
|
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
'''
Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in.
'''
if auth_list is None:
auth_list = []
if permissive is None:
permissive = self.opts.get('permissive_acl')
name_matched = False
for match in auth_provider:
if match == '*' and not permissive:
continue
if match.endswith('%'):
if match.rstrip('%') in groups:
auth_list.extend(auth_provider[match])
else:
if salt.utils.stringutils.expr_match(match, name):
name_matched = True
auth_list.extend(auth_provider[match])
if not permissive and not name_matched and '*' in auth_provider:
auth_list.extend(auth_provider['*'])
return auth_list
|
Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in.
|
def conformPadding(cls, chars):
"""
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
"""
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad
|
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
|
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%(network_id)s, kwargs=%(kwargs)s",
{'network_id': network_id, 'kwargs': kwargs})
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
|
Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
|
def snapshot(self):
"""
Return a nested dictionary snapshot of all metrics and their
values at this time. Example:
{
'category': {
'metric1_name': 42.0,
'metric2_name': 'foo'
}
}
"""
return dict((category, dict((name, metric.value())
for name, metric in list(metrics.items())))
for category, metrics in
list(self._store.items()))
|
Return a nested dictionary snapshot of all metrics and their
values at this time. Example:
{
'category': {
'metric1_name': 42.0,
'metric2_name': 'foo'
}
}
|
def vn_info(call=None, kwargs=None):
'''
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
'''
if call != 'function':
raise SaltCloudSystemExit(
'The vn_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
vn_id = kwargs.get('vn_id', None)
if vn_id:
if name:
log.warning(
'Both the \'vn_id\' and \'name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif name:
vn_id = get_vn_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The vn_info function requires either a \'name\' or a \'vn_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.info(auth, int(vn_id))
if response[0] is False:
return response[1]
else:
info = {}
tree = _get_xml(response[1])
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info
|
Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public
|
def _render_headers(self):
"""
Write the headers row
"""
headers = getattr(self, 'headers', ())
for index, col in enumerate(headers):
# We write the headers
cell = self.worksheet.cell(row=1, column=index + 1)
cell.value = col['label']
index += 1
extra_headers = getattr(self, 'extra_headers', ())
for add_index, col in enumerate(extra_headers):
cell = self.worksheet.cell(row=1, column=add_index + index + 1)
cell.value = col['label']
|
Write the headers row
|
async def close(self) -> None:
"""Close underlying connector.
Release all acquired resources.
"""
if not self.closed:
if self._connector is not None and self._connector_owner:
await self._connector.close()
self._connector = None
|
Close underlying connector.
Release all acquired resources.
|
def write(self, obj):
"""Writes the given object to the cache file as pickle. The cache file with
its path is created if needed.
"""
if self.verbose:
self._warnings("cache miss for {0}", self._cache_id_desc())
if self._start_time is not None:
elapsed = get_time() - self._start_time
else:
elapsed = None
out = self._write(self._cache_id_obj, elapsed, obj)
self._out = out
self.force_to_disk(self.get_size() > self._ram_quota)
self._last_access = get_time()
return self._read(out)[2]
|
Writes the given object to the cache file as pickle. The cache file with
its path is created if needed.
|
def get_clan_tracking(self, *tags: crtag, **params: keys):
"""Returns if the clan is currently being tracked
by the API by having either cr-api.com or royaleapi.com
in the clan description
Parameters
----------
\*tags: str
Valid clan tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + ','.join(tags) + '/tracking'
return self._get_model(url, **params)
|
Returns if the clan is currently being tracked
by the API by having either cr-api.com or royaleapi.com
in the clan description
Parameters
----------
\*tags: str
Valid clan tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
|
def timed_rotating_file_handler(name, logname, filename, when='h',
interval=1, backupCount=0,
encoding=None, delay=False, utc=False):
"""
A Bark logging handler logging output to a named file. At
intervals specified by the 'when', the file will be rotated, under
control of 'backupCount'.
Similar to logging.handlers.TimedRotatingFileHandler.
"""
return wrap_log_handler(logging.handlers.TimedRotatingFileHandler(
filename, when=when, interval=interval, backupCount=backupCount,
encoding=encoding, delay=delay, utc=utc))
|
A Bark logging handler logging output to a named file. At
intervals specified by the 'when', the file will be rotated, under
control of 'backupCount'.
Similar to logging.handlers.TimedRotatingFileHandler.
|
def designator(self):
"""\
Returns the version and error correction level as string `V-E` where
`V` represents the version number and `E` the error level.
"""
version = str(self.version)
return '-'.join((version, self.error) if self.error else (version,))
|
\
Returns the version and error correction level as string `V-E` where
`V` represents the version number and `E` the error level.
|
def dump_yaml(data, Dumper=_Dumper, default_flow_style=False):
"""Returns data as yaml-formatted string."""
content = yaml.dump(data,
default_flow_style=default_flow_style,
Dumper=Dumper)
return content.strip()
|
Returns data as yaml-formatted string.
|
def delete_refresh_token(self, refresh_token):
"""
Deletes a refresh token after use
:param refresh_token: The refresh token to delete.
"""
access_token = self.fetch_by_refresh_token(refresh_token)
self.delete(access_token.token)
|
Deletes a refresh token after use
:param refresh_token: The refresh token to delete.
|
def is_none_or(self):
"""
Ensures :attr:`subject` is either ``None``, or satisfies subsequent (chained) conditions::
Ensure(None).is_none_or.is_an(int)
"""
if self._subject is None:
return NoOpInspector(subject=self._subject, error_factory=self._error_factory)
else:
return self
|
Ensures :attr:`subject` is either ``None``, or satisfies subsequent (chained) conditions::
Ensure(None).is_none_or.is_an(int)
|
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
|
Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
|
def listar_por_equipamento(self, id_equipment):
"""List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_equipment):
raise InvalidParameterError(
u'The identifier of Equipment is invalid or was not informed.')
url = 'script/equipment/' + str(id_equipment) + '/'
code, map = self.submit(None, 'GET', url)
key = 'script'
return get_list_map(self.response(code, map, [key]), key)
|
List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def prev_img_ws(self, ws, loop=True):
"""Go to the previous image in the focused channel in the workspace.
"""
channel = self.get_active_channel_ws(ws)
if channel is None:
return
channel.prev_image()
return True
|
Go to the previous image in the focused channel in the workspace.
|
def visualize_cloud_of_words(dictionary, image_path=None):
"""
Renders the cloud of words representation for a given dictionary of frequencies
:param dictionary: the dictionary object that contains key-frequency pairs
:param image_path: the path to the image mask, None if no masking is needed
"""
from PIL import Image
if image_path is not None:
mask = np.array(Image.open(image_path))
wc = WordCloud(mask=mask, background_color='white', width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
else:
# Generate a word cloud image
wc = WordCloud(background_color='white', width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15, 15)
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show()
|
Renders the cloud of words representation for a given dictionary of frequencies
:param dictionary: the dictionary object that contains key-frequency pairs
:param image_path: the path to the image mask, None if no masking is needed
|
def cause_effect_info(self, mechanism, purview):
"""Return the cause-effect information for a mechanism over a purview.
This is the minimum of the cause and effect information.
"""
return min(self.cause_info(mechanism, purview),
self.effect_info(mechanism, purview))
|
Return the cause-effect information for a mechanism over a purview.
This is the minimum of the cause and effect information.
|
def combs(a, r):
"""NumPy implementation of ``itertools.combinations``.
Return successive ``r``-length combinations of elements in the array ``a``.
Args:
a (np.ndarray): The array from which to get combinations.
r (int): The length of the combinations.
Returns:
np.ndarray: An array of combinations.
"""
# Special-case for 0-length combinations
if r == 0:
return np.asarray([])
a = np.asarray(a)
data_type = a.dtype if r == 0 else np.dtype([('', a.dtype)] * r)
b = np.fromiter(combinations(a, r), data_type)
return b.view(a.dtype).reshape(-1, r)
|
NumPy implementation of ``itertools.combinations``.
Return successive ``r``-length combinations of elements in the array ``a``.
Args:
a (np.ndarray): The array from which to get combinations.
r (int): The length of the combinations.
Returns:
np.ndarray: An array of combinations.
|
def fit_linear(X, y):
"""
Uses OLS to fit the regression.
"""
model = linear_model.LinearRegression()
model.fit(X, y)
return model
|
Uses OLS to fit the regression.
|
def unpack_4to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
"""
tmpdata = data.astype(np.int16) # np.empty(upshape, dtype=np.int16)
tmpdata = (tmpdata | (tmpdata << 4)) & 0x0F0F
# tmpdata = tmpdata << 4 # Shift into high bits to avoid needing to sign extend
updata = tmpdata.byteswap()
return updata.view(data.dtype)
|
Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
|
def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
'''Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'''
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ]
|
Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
|
def plot(self, entity):
"""
Basic plot of a single binary sensor data.
Parameters
----------
entity : string
The entity to plot
"""
df = self._binary_df[[entity]]
resampled = df.resample("s").ffill() # Sample at seconds and ffill
resampled.columns = ["value"]
fig, ax = plt.subplots(1, 1, figsize=(16, 2))
ax.fill_between(resampled.index, y1=0, y2=1, facecolor="royalblue", label="off")
ax.fill_between(
resampled.index,
y1=0,
y2=1,
where=(resampled["value"] > 0),
facecolor="red",
label="on",
)
ax.set_title(entity)
ax.set_xlabel("Date")
ax.set_frame_on(False)
ax.set_yticks([])
plt.legend(loc=(1.01, 0.7))
plt.show()
return
|
Basic plot of a single binary sensor data.
Parameters
----------
entity : string
The entity to plot
|
def execute_command(working_dir, cmd, env_dict):
"""
execute_command: run the command provided in the working dir
specified adding the env_dict settings to the
execution environment
:param working_dir: path to directory to execute command
also gets added to the PATH
:param cmd: Shell command to execute
:param env_dict: dictionary of additional env vars to
be passed to the subprocess environment
"""
proc_env = os.environ.copy()
proc_env["PATH"] = "{}:{}:.".format(proc_env["PATH"], working_dir)
proc_env.update(env_dict)
proc = subprocess.Popen(
cmd,
cwd=working_dir,
env=proc_env,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
status = proc.wait()
stdout, stderr = proc.communicate()
if status:
msg = (
"Non zero {} exit from command {}\n"
"Stdout: {}\n"
"Stderr: {}\n"
).format(status, cmd, stdout, stderr)
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(stdout)
|
execute_command: run the command provided in the working dir
specified adding the env_dict settings to the
execution environment
:param working_dir: path to directory to execute command
also gets added to the PATH
:param cmd: Shell command to execute
:param env_dict: dictionary of additional env vars to
be passed to the subprocess environment
|
def add_proxy_to(self, parent, name, multiplicity=Multiplicity.ONE_MANY, **kwargs):
# type: (Any, AnyStr, Any, **Any) -> Part
"""Add this model as a proxy to another parent model.
This will add the current model as a proxy model to another parent model. It ensure that it will copy the
whole subassembly to the 'parent' model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param name: Name of the new proxy model
:type name: basestring
:param parent: parent of the to be proxied model
:type parent: :class:`Part`
:param multiplicity: the multiplicity of the new proxy model (default ONE_MANY)
:type multiplicity: basestring or None
:param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request
:type kwargs: dict or None
:return: the new proxied :class:`Part`.
:raises APIError: in case an Error occurs
Examples
--------
>>> from pykechain.enums import Multiplicity
>>> bike_model = project.model('Bike')
# find the catalog model container, the highest parent to create catalog models under
>>> catalog_model_container = project.model('Catalog container')
>>> new_wheel_model = project.create_model(catalog_model_container, 'Wheel Catalog',
... multiplicity=Multiplicity.ZERO_MANY)
>>> new_wheel_model.add_proxy_to(bike_model, "Wheel", multiplicity=Multiplicity.ONE_MANY)
"""
return self._client.create_proxy_model(self, parent, name, multiplicity, **kwargs)
|
Add this model as a proxy to another parent model.
This will add the current model as a proxy model to another parent model. It ensure that it will copy the
whole subassembly to the 'parent' model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param name: Name of the new proxy model
:type name: basestring
:param parent: parent of the to be proxied model
:type parent: :class:`Part`
:param multiplicity: the multiplicity of the new proxy model (default ONE_MANY)
:type multiplicity: basestring or None
:param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request
:type kwargs: dict or None
:return: the new proxied :class:`Part`.
:raises APIError: in case an Error occurs
Examples
--------
>>> from pykechain.enums import Multiplicity
>>> bike_model = project.model('Bike')
# find the catalog model container, the highest parent to create catalog models under
>>> catalog_model_container = project.model('Catalog container')
>>> new_wheel_model = project.create_model(catalog_model_container, 'Wheel Catalog',
... multiplicity=Multiplicity.ZERO_MANY)
>>> new_wheel_model.add_proxy_to(bike_model, "Wheel", multiplicity=Multiplicity.ONE_MANY)
|
def p_arglist(p):
'''
arglist : arg
| arglist COMMA arg
'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[0].append(p[3])
|
arglist : arg
| arglist COMMA arg
|
def _defaultdict(dct, fallback=_illegal_character):
"""Wraps the given dictionary such that the given fallback function will be called when a nonexistent key is
accessed.
"""
out = defaultdict(lambda: fallback)
for k, v in six.iteritems(dct):
out[k] = v
return out
|
Wraps the given dictionary such that the given fallback function will be called when a nonexistent key is
accessed.
|
def merge(self, commit_message='', sha=None):
"""Merge this pull request.
:param str commit_message: (optional), message to be used for the
merge commit
:returns: bool
"""
parameters = {'commit_message': commit_message}
if sha:
parameters['sha'] = sha
url = self._build_url('merge', base_url=self._api)
json = self._json(self._put(url, data=dumps(parameters)), 200)
self.merge_commit_sha = json['sha']
return json['merged']
|
Merge this pull request.
:param str commit_message: (optional), message to be used for the
merge commit
:returns: bool
|
def parse_type(defn, preprocess=True):
"""
Parse a simple type expression into a SimType
>>> parse_type('int *')
"""
if pycparser is None:
raise ImportError("Please install pycparser in order to parse C definitions")
defn = 'typedef ' + defn.strip('; \n\t\r') + ' QQQQ;'
if preprocess:
defn = do_preprocess(defn)
node = pycparser.c_parser.CParser().parse(make_preamble()[0] + defn)
if not isinstance(node, pycparser.c_ast.FileAST) or \
not isinstance(node.ext[-1], pycparser.c_ast.Typedef):
raise ValueError("Something went horribly wrong using pycparser")
decl = node.ext[-1].type
return _decl_to_type(decl)
|
Parse a simple type expression into a SimType
>>> parse_type('int *')
|
def classify_harmonic(self, partial_labels, use_CMN=True):
'''Harmonic function method for semi-supervised classification,
also known as the Gaussian Mean Fields algorithm.
partial_labels: (n,) array of integer labels, -1 for unlabeled.
use_CMN : when True, apply Class Mass Normalization
From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions"
by Zhu, Ghahramani, and Lafferty in 2003.
Based on the matlab code at:
http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m
'''
# prepare labels
labels = np.array(partial_labels, copy=True)
unlabeled = labels == -1
# convert known labels to one-hot encoding
fl, classes = _onehot(labels[~unlabeled])
L = self.laplacian(normed=False)
if ss.issparse(L):
L = L.tocsr()[unlabeled].toarray()
else:
L = L[unlabeled]
Lul = L[:,~unlabeled]
Luu = L[:,unlabeled]
fu = -np.linalg.solve(Luu, Lul.dot(fl))
if use_CMN:
scale = (1 + fl.sum(axis=0)) / fu.sum(axis=0)
fu *= scale
# assign new labels
labels[unlabeled] = classes[fu.argmax(axis=1)]
return labels
|
Harmonic function method for semi-supervised classification,
also known as the Gaussian Mean Fields algorithm.
partial_labels: (n,) array of integer labels, -1 for unlabeled.
use_CMN : when True, apply Class Mass Normalization
From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions"
by Zhu, Ghahramani, and Lafferty in 2003.
Based on the matlab code at:
http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.