code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def npz_generator(npz_path):
"""Generate data from an npz file."""
npz_data = np.load(npz_path)
X = npz_data['X']
# Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,)
y = npz_data['Y']
n = X.shape[0]
while True:
i = np.random.randint(0, n)
yield {'X': X[i], 'Y': y[i]}
|
Generate data from an npz file.
|
def version():
"""
Returns a human-readable version string.
For official releases, it will follow a semver style (e.g. ``1.2.7``).
For dev versions, it will have the semver style first, followed by
hyphenated qualifiers (e.g. ``1.2.7-dev``).
Returns a string.
"""
short = '.'.join([str(bit) for bit in __version__[:3]])
return '-'.join([short] + [str(bit) for bit in __version__[3:]])
|
Returns a human-readable version string.
For official releases, it will follow a semver style (e.g. ``1.2.7``).
For dev versions, it will have the semver style first, followed by
hyphenated qualifiers (e.g. ``1.2.7-dev``).
Returns a string.
|
def add_variables_from(self, linear, vartype=None):
"""Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0
"""
if isinstance(linear, abc.Mapping):
for v, bias in iteritems(linear):
self.add_variable(v, bias, vartype=vartype)
else:
try:
for v, bias in linear:
self.add_variable(v, bias, vartype=vartype)
except TypeError:
raise TypeError("expected 'linear' to be a dict or an iterable of 2-tuples.")
|
Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0
|
def remove_external_references_from_roles(self):
"""
Removes any external references on any of the roles from the predicate
"""
for node_role in self.node.findall('role'):
role = Crole(node_role)
role.remove_external_references()
|
Removes any external references on any of the roles from the predicate
|
def nearest_overlap(self, overlap, bins):
"""Return nearest overlap/crop factor based on number of bins"""
bins_overlap = overlap * bins
if bins_overlap % 2 != 0:
bins_overlap = math.ceil(bins_overlap / 2) * 2
overlap = bins_overlap / bins
logger.warning('number of overlapping FFT bins should be even, '
'changing overlap/crop factor to {:.5f}'.format(overlap))
return overlap
|
Return nearest overlap/crop factor based on number of bins
|
def find_bled112_devices(cls):
"""Look for BLED112 dongles on this computer and start an instance on each one"""
found_devs = []
ports = serial.tools.list_ports.comports()
for port in ports:
if not hasattr(port, 'pid') or not hasattr(port, 'vid'):
continue
# Check if the device matches the BLED112's PID/VID combination
if port.pid == 1 and port.vid == 9304:
found_devs.append(port.device)
return found_devs
|
Look for BLED112 dongles on this computer and start an instance on each one
|
def vmdk_to_ami(args):
"""
Calls methods to perform vmdk import
:param args:
:return:
"""
aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket,
args.aws_regions, args.ami_name, args.vmdk_upload_file)
aws_importer.import_vmdk()
|
Calls methods to perform vmdk import
:param args:
:return:
|
def validate_api_call(schema, raw_request, raw_response):
"""
Validate the request/response cycle of an api call against a swagger
schema. Request/Response objects from the `requests` and `urllib` library
are supported.
"""
request = normalize_request(raw_request)
with ErrorDict() as errors:
try:
validate_request(
request=request,
schema=schema,
)
except ValidationError as err:
errors['request'].add_error(err.messages or getattr(err, 'detail'))
return
response = normalize_response(raw_response, raw_request)
try:
validate_response(
response=response,
request_method=request.method,
schema=schema
)
except ValidationError as err:
errors['response'].add_error(err.messages or getattr(err, 'detail'))
|
Validate the request/response cycle of an api call against a swagger
schema. Request/Response objects from the `requests` and `urllib` library
are supported.
|
def write_from_file(library, session, filename, count):
"""Take data from a file and write it out synchronously.
Corresponds to viWriteFromFile function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param filename: Name of file from which data will be read.
:param count: Number of bytes to be written.
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
"""
return_count = ViUInt32()
ret = library.viWriteFromFile(session, filename, count, return_count)
return return_count, ret
|
Take data from a file and write it out synchronously.
Corresponds to viWriteFromFile function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param filename: Name of file from which data will be read.
:param count: Number of bytes to be written.
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, :class:`pyvisa.constants.StatusCode`
|
def _verify(
self,
request,
return_payload=False,
verify=True,
raise_missing=False,
request_args=None,
request_kwargs=None,
*args,
**kwargs
):
"""
If there is a "permakey", then we will verify the token by checking the
database. Otherwise, just do the normal verification.
Typically, any method that begins with an underscore in sanic-jwt should
not be touched. In this case, we are trying to break the rules a bit to handle
a unique use case: handle both expirable and non-expirable tokens.
"""
if "permakey" in request.headers:
# Extract the permakey from the headers
permakey = request.headers.get("permakey")
# In production, probably should have some exception handling Here
# in case the permakey is an empty string or some other bad value
payload = self._decode(permakey, verify=verify)
# Sometimes, the application will call _verify(...return_payload=True)
# So, let's make sure to handle this scenario.
if return_payload:
return payload
# Retrieve the user from the database
user_id = payload.get("user_id", None)
user = userid_table.get(user_id)
# If wer cannot find a user, then this method should return
# is_valid == False
# reason == some text for why
# status == some status code, probably a 401
if not user_id or not user:
is_valid = False
reason = "No user found"
status = 401
else:
# After finding a user, make sure the permakey matches,
# or else return a bad status or some other error.
# In production, both this scenario, and the above "No user found"
# scenario should return an identical message and status code.
# This is to prevent your application accidentally
# leaking information about the existence or non-existence of users.
is_valid = user.permakey == permakey
reason = None if is_valid else "Permakey mismatch"
status = 200 if is_valid else 401
return is_valid, status, reason
else:
return super()._verify(
request=request,
return_payload=return_payload,
verify=verify,
raise_missing=raise_missing,
request_args=request_args,
request_kwargs=request_kwargs,
*args,
**kwargs
)
|
If there is a "permakey", then we will verify the token by checking the
database. Otherwise, just do the normal verification.
Typically, any method that begins with an underscore in sanic-jwt should
not be touched. In this case, we are trying to break the rules a bit to handle
a unique use case: handle both expirable and non-expirable tokens.
|
def get_next_work_day(self, division=None, date=None):
"""
Returns the next work day, skipping weekends and bank holidays
:param division: see division constants; defaults to common holidays
:param date: search starting from this date; defaults to today
:return: datetime.date; NB: get_next_holiday returns a dict
"""
date = date or datetime.date.today()
one_day = datetime.timedelta(days=1)
holidays = set(holiday['date'] for holiday in self.get_holidays(division=division))
while True:
date += one_day
if date.weekday() not in self.weekend and date not in holidays:
return date
|
Returns the next work day, skipping weekends and bank holidays
:param division: see division constants; defaults to common holidays
:param date: search starting from this date; defaults to today
:return: datetime.date; NB: get_next_holiday returns a dict
|
def _lincomb(self, a, x, b, y, out):
"""Linear combination ``out = a*x + b*y``."""
for space, xp, yp, outp in zip(self.spaces, x.parts, y.parts,
out.parts):
space._lincomb(a, xp, b, yp, outp)
|
Linear combination ``out = a*x + b*y``.
|
def _read_para_r1_counter(self, code, cbit, clen, *, desc, length, version):
"""Read HIP R1_COUNTER parameter.
Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved, 4 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| R1 generation counter, 8 bytes |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ri_counter.type Parameter Type
1 15 ri_counter.critical Critical Bit
2 16 ri_counter.length Length of Contents
4 32 - Reserved
8 64 ri_counter.count Generation of Valid Puzzles
"""
if clen != 12:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
if code == 128 and version != 1:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid parameter')
_resv = self._read_fileng(4)
_genc = self._read_unpack(8)
r1_counter = dict(
type=desc,
critical=cbit,
length=clen,
count=_genc,
)
return r1_counter
|
Read HIP R1_COUNTER parameter.
Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved, 4 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| R1 generation counter, 8 bytes |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ri_counter.type Parameter Type
1 15 ri_counter.critical Critical Bit
2 16 ri_counter.length Length of Contents
4 32 - Reserved
8 64 ri_counter.count Generation of Valid Puzzles
|
def partial_normalize(self, axis: AxisIdentifier = 0, inplace: bool = False):
"""Normalize in rows or columns.
Parameters
----------
axis: int or str
Along which axis to sum (numpy-sense)
inplace: bool
Update the object itself
Returns
-------
hist : Histogram2D
"""
# TODO: Is this applicable for HistogramND?
axis = self._get_axis(axis)
if not inplace:
copy = self.copy()
copy.partial_normalize(axis, inplace=True)
return copy
else:
self._coerce_dtype(float)
if axis == 0:
divisor = self._frequencies.sum(axis=0)
else:
divisor = self._frequencies.sum(axis=1)[:, np.newaxis]
divisor[divisor == 0] = 1 # Prevent division errors
self._frequencies /= divisor
self._errors2 /= (divisor * divisor) # Has its limitations
return self
|
Normalize in rows or columns.
Parameters
----------
axis: int or str
Along which axis to sum (numpy-sense)
inplace: bool
Update the object itself
Returns
-------
hist : Histogram2D
|
def get_rgb_hex(self):
"""
Converts the RGB value to a hex value in the form of: #RRGGBB
:rtype: str
"""
rgb_r, rgb_g, rgb_b = self.get_upscaled_value_tuple()
return '#%02x%02x%02x' % (rgb_r, rgb_g, rgb_b)
|
Converts the RGB value to a hex value in the form of: #RRGGBB
:rtype: str
|
def regen_keys():
'''
Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys
'''
for fn_ in os.listdir(__opts__['pki_dir']):
path = os.path.join(__opts__['pki_dir'], fn_)
try:
os.remove(path)
except os.error:
pass
# TODO: move this into a channel function? Or auth?
# create a channel again, this will force the key regen
channel = salt.transport.client.ReqChannel.factory(__opts__)
channel.close()
|
Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys
|
def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n')
|
Save items from list to the file.
|
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL
"""
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment
|
Convert descriptor to SQL
|
def get_results(self, *, block=False, timeout=None):
"""Get the results of each job in the group.
Parameters:
block(bool): Whether or not to block until the results are stored.
timeout(int): The maximum amount of time, in milliseconds,
to wait for results when block is True. Defaults to 10
seconds.
Raises:
ResultMissing: When block is False and the results aren't set.
ResultTimeout: When waiting for results times out.
Returns:
A result generator.
"""
deadline = None
if timeout:
deadline = time.monotonic() + timeout / 1000
for child in self.children:
if deadline:
timeout = max(0, int((deadline - time.monotonic()) * 1000))
if isinstance(child, group):
yield list(child.get_results(block=block, timeout=timeout))
else:
yield child.get_result(block=block, timeout=timeout)
|
Get the results of each job in the group.
Parameters:
block(bool): Whether or not to block until the results are stored.
timeout(int): The maximum amount of time, in milliseconds,
to wait for results when block is True. Defaults to 10
seconds.
Raises:
ResultMissing: When block is False and the results aren't set.
ResultTimeout: When waiting for results times out.
Returns:
A result generator.
|
def is_stationary(self):
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix. """
# for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute
# it directly. Therefore we test whether the initial distribution is stationary.
return np.allclose(np.dot(self._Pi, self._Tij), self._Pi)
|
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix.
|
def get_default_config():
"""
Produces a stock/out-of-the-box TidyPy configuration.
:rtype: dict
"""
config = {}
for name, cls in iteritems(get_tools()):
config[name] = cls.get_default_config()
try:
workers = multiprocessing.cpu_count() - 1
except NotImplementedError: # pragma: no cover
workers = 1
workers = max(1, min(4, workers))
config.update({
'exclude': [],
'merge-issues': True,
'workers': workers,
'requested_reports': [
{
'type': 'console',
},
],
'disabled': [],
'noqa': True,
'extends': [],
'ignore-missing-extends': False,
})
return config
|
Produces a stock/out-of-the-box TidyPy configuration.
:rtype: dict
|
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
m= 4.*R*self.a/Raz2
return (3.*(R+self.a)/Raz2
-2.*((1.+m)/(1.-m)-special.ellipk(m)/special.ellipe(m))\
*self.a*(self.a2+z**2-R**2)/Raz2**2/m)*self._zforce(R,z)
|
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2018-08-04 - Written - Bovy (UofT)
|
def get_encodings_from_content(content):
"""
Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from.
"""
if isinstance(content, bytes):
find_charset = re.compile(
br'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I
).findall
find_xml = re.compile(
br'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]'
).findall
return [encoding.decode('utf-8') for encoding in
find_charset(content) + find_xml(content)]
else:
find_charset = re.compile(
r'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I
).findall
find_xml = re.compile(
r'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]'
).findall
return find_charset(content) + find_xml(content)
|
Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from.
|
def __recognize_user_class(self, node: yaml.Node,
expected_type: Type) -> RecResult:
"""Recognize a user-defined class in the node.
This tries to recognize only exactly the specified class. It \
recurses down into the class's attributes, but not to its \
subclasses. See also __recognize_user_classes().
Args:
node: The node to recognize.
expected_type: A user-defined class.
Returns:
A list containing the user-defined class, or an empty list.
"""
logger.debug('Recognizing as a user-defined class')
loc_str = '{}{}'.format(node.start_mark, os.linesep)
if hasattr(expected_type, 'yatiml_recognize'):
try:
unode = UnknownNode(self, node)
expected_type.yatiml_recognize(unode)
return [expected_type], ''
except RecognitionError as e:
if len(e.args) > 0:
message = ('Error recognizing a {}\n{}because of the'
' following error(s): {}').format(
expected_type.__class__, loc_str,
indent(e.args[0], ' '))
else:
message = 'Error recognizing a {}\n{}'.format(
expected_type.__class__, loc_str)
return [], message
else:
if issubclass(expected_type, enum.Enum):
if (not isinstance(node, yaml.ScalarNode)
or node.tag != 'tag:yaml.org,2002:str'):
message = 'Expected an enum value from {}\n{}'.format(
expected_type.__class__, loc_str)
return [], message
elif (issubclass(expected_type, UserString)
or issubclass(expected_type, str)):
if (not isinstance(node, yaml.ScalarNode)
or node.tag != 'tag:yaml.org,2002:str'):
message = 'Expected a string matching {}\n{}'.format(
expected_type.__class__, loc_str)
return [], message
else:
# auto-recognize based on constructor signature
if not isinstance(node, yaml.MappingNode):
message = 'Expected a dict/mapping here\n{}'.format(
loc_str)
return [], message
for attr_name, type_, required in class_subobjects(
expected_type):
cnode = Node(node)
# try exact match first, dashes if that doesn't match
for name in [attr_name, attr_name.replace('_', '-')]:
if cnode.has_attribute(name):
subnode = cnode.get_attribute(name)
recognized_types, message = self.recognize(
subnode.yaml_node, type_)
if len(recognized_types) == 0:
message = ('Failed when checking attribute'
' {}:\n{}').format(
name, indent(message, ' '))
return [], message
break
else:
if required:
message = (
'Error recognizing a {}\n{}because it'
' is missing an attribute named {}').format(
expected_type.__name__, loc_str, attr_name)
if '_' in attr_name:
message += ' or maybe {}.\n'.format(
attr_name.replace('_', '-'))
else:
message += '.\n'
return [], message
return [expected_type], ''
|
Recognize a user-defined class in the node.
This tries to recognize only exactly the specified class. It \
recurses down into the class's attributes, but not to its \
subclasses. See also __recognize_user_classes().
Args:
node: The node to recognize.
expected_type: A user-defined class.
Returns:
A list containing the user-defined class, or an empty list.
|
def attempt_sync_with_master(pr: PullRequestDetails
) -> Union[bool, CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/repos/merging/#perform-a-merge
"""
master_sha = get_master_sha(pr.repo)
remote = pr.remote_repo
url = ("https://api.github.com/repos/{}/{}/merges"
"?access_token={}".format(remote.organization,
remote.name,
remote.access_token))
data = {
'base': pr.branch_name,
'head': master_sha,
'commit_message': 'Update branch (automerge)'.format(pr.branch_name)
}
response = requests.post(url, json=data)
if response.status_code == 201:
# Merge succeeded.
log('Synced #{} ({!r}) with master.'.format(pr.pull_id, pr.title))
return True
if response.status_code == 204:
# Already merged.
return False
if response.status_code == 409:
# Merge conflict.
return CannotAutomergeError("There's a merge conflict.")
if response.status_code == 403:
# Permission denied.
return CannotAutomergeError(
"Spurious failure. Github API requires me to be an admin on the "
"fork repository to merge master into the PR branch. Hit "
"'Update Branch' for me before trying again.")
raise RuntimeError('Sync with master failed for unknown reason. '
'Code: {}. Content: {}.'.format(response.status_code,
response.content))
|
References:
https://developer.github.com/v3/repos/merging/#perform-a-merge
|
def rating(self, **kwargs):
"""
This method lets users rate a movie. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
|
This method lets users rate a movie. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict representation of the JSON returned from the API.
|
def twisted_absolute_path(path, request):
"""Hack to fix twisted not accepting absolute URIs"""
parsed = urlparse.urlparse(request.uri)
if parsed.scheme != '':
path_parts = parsed.path.lstrip('/').split('/')
request.prepath = path_parts[0:1]
request.postpath = path_parts[1:]
path = request.prepath[0]
return path, request
|
Hack to fix twisted not accepting absolute URIs
|
def download_file(url):
"""
Downloads a file from the specified URL.
:param str url: The URL to the file to be downloaded
:returns: the downloaded file's content
:rtype: str
"""
response = requests.get(url)
if response.status_code is not 200:
return None
return response.text
|
Downloads a file from the specified URL.
:param str url: The URL to the file to be downloaded
:returns: the downloaded file's content
:rtype: str
|
def pdf(cls, mass, log_mode=True):
""" PDF for the Salpeter IMF.
Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun
"""
alpha = 2.35
a = 0.060285569480482866
dn_dm = a * mass**(-alpha)
if log_mode:
# Number per logarithmic mass range, i.e., dN/dlog(M)
return dn_dm * (mass * np.log(10))
else:
# Number per linear mass range, i.e., dN/dM
return dn_dm
|
PDF for the Salpeter IMF.
Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun
|
def ecg_wave_detector(ecg, rpeaks):
"""
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
"""
q_waves = []
p_waves = []
q_waves_starts = []
s_waves = []
t_waves = []
t_waves_starts = []
t_waves_ends = []
for index, rpeak in enumerate(rpeaks[:-3]):
try:
epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)]
epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)]
epoch_before = list(reversed(epoch_before))
q_wave_index = np.min(find_peaks(epoch_before))
q_wave = rpeak - q_wave_index
p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:])
p_wave = rpeak - p_wave_index
inter_pq = epoch_before[q_wave_index:p_wave_index]
inter_pq_derivative = np.gradient(inter_pq, 2)
q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative))
q_start = q_wave - q_start_index
q_waves.append(q_wave)
p_waves.append(p_wave)
q_waves_starts.append(q_start)
except ValueError:
pass
except IndexError:
pass
try:
epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
epoch_after = epoch_after[0:int(len(epoch_after)/2)]
s_wave_index = np.min(find_peaks(epoch_after))
s_wave = rpeak + s_wave_index
t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:])
t_wave = rpeak + t_wave_index
inter_st = epoch_after[s_wave_index:t_wave_index]
inter_st_derivative = np.gradient(inter_st, 2)
t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative))
t_start = s_wave + t_start_index
t_end = np.min(find_peaks(epoch_after[t_wave_index:]))
t_end = t_wave + t_end
s_waves.append(s_wave)
t_waves.append(t_wave)
t_waves_starts.append(t_start)
t_waves_ends.append(t_end)
except ValueError:
pass
except IndexError:
pass
# pd.Series(epoch_before).plot()
# t_waves = []
# for index, rpeak in enumerate(rpeaks[0:-1]):
#
# epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
# pd.Series(epoch).plot()
#
# # T wave
# middle = (rpeaks[index+1] - rpeak) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)]
#
# try:
# t_wave = int(rpeak+quarter) + np.argmax(epoch)
# t_waves.append(t_wave)
# except ValueError:
# pass
#
# p_waves = []
# for index, rpeak in enumerate(rpeaks[1:]):
# index += 1
# # Q wave
# middle = (rpeak - rpeaks[index-1]) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)]
#
# try:
# p_wave = int(rpeak-quarter) + np.argmax(epoch)
# p_waves.append(p_wave)
# except ValueError:
# pass
#
# q_waves = []
# for index, p_wave in enumerate(p_waves):
# epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])]
#
# try:
# q_wave = p_wave + np.argmin(epoch)
# q_waves.append(q_wave)
# except ValueError:
# pass
#
# # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval
ecg_waves = {"T_Waves": t_waves,
"P_Waves": p_waves,
"Q_Waves": q_waves,
"S_Waves": s_waves,
"Q_Waves_Onsets": q_waves_starts,
"T_Waves_Onsets": t_waves_starts,
"T_Waves_Ends": t_waves_ends}
return(ecg_waves)
|
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
def _get_gc2_coordinates_for_rupture(self, edge_sets):
"""
Calculates the GC2 coordinates for the nodes of the upper edge of the
fault
"""
# Establish GC2 length - for use with Ry0
rup_gc2t, rup_gc2u = self.get_generalised_coordinates(
edge_sets[:, 0], edge_sets[:, 1])
# GC2 length should be the largest positive GC2 value of the edges
self.gc_length = numpy.max(rup_gc2u)
|
Calculates the GC2 coordinates for the nodes of the upper edge of the
fault
|
def _maybe_append_chunk(chunk_info, line_index, column, contents, chunks):
"""Append chunk_info to chunks if it is set."""
if chunk_info:
chunks.append(_chunk_from_ranges(contents,
chunk_info[0],
chunk_info[1],
line_index,
column))
|
Append chunk_info to chunks if it is set.
|
def parse_s2bins(s2bins):
"""
parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds
"""
s2b = {}
b2s = {}
for line in s2bins:
line = line.strip().split()
s, b = line[0], line[1]
if 'UNK' in b:
continue
if len(line) > 2:
g = ' '.join(line[2:])
else:
g = 'n/a'
b = '%s\t%s' % (b, g)
s2b[s] = b
if b not in b2s:
b2s[b] = []
b2s[b].append(s)
return s2b, b2s
|
parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds
|
def _sysv_services():
'''
Return list of sysv services.
'''
_services = []
output = __salt__['cmd.run'](['chkconfig', '--list'], python_shell=False)
for line in output.splitlines():
comps = line.split()
try:
if comps[1].startswith('0:'):
_services.append(comps[0])
except IndexError:
continue
# Return only the services that have an initscript present
return [x for x in _services if _service_is_sysv(x)]
|
Return list of sysv services.
|
def owners(self):
"""Legacy access to owner role.
DEPRECATED: use ``policy["roles/owners"]`` instead."""
result = set()
for role in self._OWNER_ROLES:
for member in self._bindings.get(role, ()):
result.add(member)
return frozenset(result)
|
Legacy access to owner role.
DEPRECATED: use ``policy["roles/owners"]`` instead.
|
def read_mrz(file, save_roi=False, extra_cmdline_params=''):
"""The main interface function to this module, encapsulating the recognition pipeline.
Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object.
:param file: A filename or a stream to read the file data from.
:param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from.
:param extra_cmdline_params:extra parameters to the ocr.py
"""
p = MRZPipeline(file, extra_cmdline_params)
mrz = p.result
if mrz is not None:
mrz.aux['text'] = p['text']
if save_roi:
mrz.aux['roi'] = p['roi']
return mrz
|
The main interface function to this module, encapsulating the recognition pipeline.
Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object.
:param file: A filename or a stream to read the file data from.
:param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from.
:param extra_cmdline_params:extra parameters to the ocr.py
|
def eci2geodetic (x, y, z, gmst=None, ellipsoid=None):
"""Converts the given ECI coordinates to Geodetic coordinates at the
given Greenwich Mean Sidereal Time (GMST) (defaults to now) and with
the given ellipsoid (defaults to WGS84).
This code was adapted from
`shashwatak/satellite-js <https://github.com/shashwatak/satellite-js/blob/master/src/coordinate-transforms.js>`_
and http://www.celestrak.com/columns/v02n03/
"""
if gmst is None:
gmst = dmc.toGMST()
if ellipsoid is None:
ellipsoid = WGS84
a = WGS84.a
b = WGS84.b
f = WGS84.f
r = math.sqrt((x * x) + (y * y))
e2 = (2 * f) - (f * f)
lon = math.atan2(y, x) - gmst
k = 0
kmax = 20
lat = math.atan2(z, r)
while (k < kmax):
slat = math.sin(lat)
C = 1 / math.sqrt( 1 - e2 * (slat * slat) )
lat = math.atan2(z + (a * C * e2 * slat), r)
k += 1
z = (r / math.cos(lat)) - (a * C)
return lat, lon, z
|
Converts the given ECI coordinates to Geodetic coordinates at the
given Greenwich Mean Sidereal Time (GMST) (defaults to now) and with
the given ellipsoid (defaults to WGS84).
This code was adapted from
`shashwatak/satellite-js <https://github.com/shashwatak/satellite-js/blob/master/src/coordinate-transforms.js>`_
and http://www.celestrak.com/columns/v02n03/
|
def handle(self, **options):
"""
Removes any entries in the AccessAttempt that are older
than your DEFENDER_ACCESS_ATTEMPT_EXPIRATION config, default 24 HOURS.
"""
print("Starting clean up of django-defender table")
now = timezone.now()
cleanup_delta = timedelta(hours=config.ACCESS_ATTEMPT_EXPIRATION)
min_attempt_time = now - cleanup_delta
attempts_to_clean = AccessAttempt.objects.filter(
attempt_time__lt=min_attempt_time,
)
attempts_to_clean_count = attempts_to_clean.count()
attempts_to_clean.delete()
print(
"Finished. Removed {0} AccessAttempt entries.".format(
attempts_to_clean_count)
)
|
Removes any entries in the AccessAttempt that are older
than your DEFENDER_ACCESS_ATTEMPT_EXPIRATION config, default 24 HOURS.
|
def set_cookie(cookies, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
'''Set a cookie key into the cookies dictionary *cookies*.'''
cookies[key] = value
if expires is not None:
if isinstance(expires, datetime):
now = (expires.now(expires.tzinfo) if expires.tzinfo else
expires.utcnow())
delta = expires - now
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
cookies[key]['expires'] = expires
if max_age is not None:
cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
cookies[key]['expires'] = http_date(time.time() + max_age)
if path is not None:
cookies[key]['path'] = path
if domain is not None:
cookies[key]['domain'] = domain
if secure:
cookies[key]['secure'] = True
if httponly:
cookies[key]['httponly'] = True
|
Set a cookie key into the cookies dictionary *cookies*.
|
def _add_current_quay_tag(repo, container_tags):
"""Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions.
"""
if ':' in repo:
return repo, container_tags
try:
latest_tag = container_tags[repo]
except KeyError:
repo_id = repo[repo.find('/') + 1:]
tags = requests.request("GET", "https://quay.io/api/v1/repository/" + repo_id).json()["tags"]
latest_tag = None
latest_modified = None
for tag, info in tags.items():
if latest_tag:
if (dateutil.parser.parse(info['last_modified']) > dateutil.parser.parse(latest_modified)
and tag != 'latest'):
latest_modified = info['last_modified']
latest_tag = tag
else:
latest_modified = info['last_modified']
latest_tag = tag
container_tags[repo] = str(latest_tag)
latest_pull = repo + ':' + str(latest_tag)
return latest_pull, container_tags
|
Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions.
|
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object.
"""
if isinstance(string, basestring):
print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)]))
if isinstance(string, Text):
print("\n\n".join([table(sentence, fill=column) for sentence in string]))
if isinstance(string, Sentence):
print(table(string, fill=column))
|
Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object.
|
def prune_train_dirs(dir_: str, epochs: int, subdirs: bool) -> None:
"""
Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs
"""
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
_prune(dir_, epochs)
if subdirs:
_prune_subdirs(dir_)
|
Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs
|
def load_gene(ensembl, gene_id, de_novos=[]):
""" sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences
"""
transcripts = minimise_transcripts(ensembl, gene_id, de_novos)
genes = []
for transcript_id in transcripts:
gene = construct_gene_object(ensembl, transcript_id)
genes.append(gene)
if len(genes) == 0:
raise IndexError("{0}: no suitable transcripts".format(gene_id))
return genes
|
sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences
|
def FromReadings(cls, uuid, readings, sent_timestamp=0):
"""Generate a broadcast report from a list of readings and a uuid."""
header = struct.pack("<BBHLLL", cls.ReportType, 0, len(readings)*16, uuid, sent_timestamp, 0)
packed_readings = bytearray()
for reading in readings:
packed_reading = struct.pack("<HHLLL", reading.stream, 0, reading.reading_id,
reading.raw_time, reading.value)
packed_readings += bytearray(packed_reading)
return BroadcastReport(bytearray(header) + packed_readings)
|
Generate a broadcast report from a list of readings and a uuid.
|
def update_or_create(self, location, contact_addresses, with_status=False,
overwrite_existing=False, **kw):
"""
Update or create a contact address and location pair. If the
location does not exist it will be automatically created. If the
server already has a location assigned with the same name, the
contact address specified will be added if it doesn't already
exist (Management and Log Server can have multiple address for a
single location).
:param list(str) contact_addresses: list of contact addresses for
the specified location
:param str location: location to place the contact address in
:param bool overwrite_existing: if you want to replace existing
location to address mappings set this to True. Otherwise if
the location exists, only new addresses are appended
:param bool with_status: if set to True, a 3-tuple is returned with
(Element, modified, created), where the second and third tuple
items are booleans indicating the status
:raises UpdateElementFailed: failed to update element with reason
:rtype: MultiContactAddress
"""
updated, created = False, False
location_ref = location_helper(location)
if location_ref in self:
for loc in self:
if loc.location_ref == location_ref:
if overwrite_existing:
loc['addresses'][:] = contact_addresses
updated = True
else:
for ca in contact_addresses:
if ca not in loc.addresses:
loc['addresses'].append(ca)
updated = True
else:
self.data.setdefault('multi_contact_addresses', []).append(
dict(addresses=contact_addresses, location_ref=location_ref))
created = True
if updated or created:
self.update()
if with_status:
return self, updated, created
return self
|
Update or create a contact address and location pair. If the
location does not exist it will be automatically created. If the
server already has a location assigned with the same name, the
contact address specified will be added if it doesn't already
exist (Management and Log Server can have multiple address for a
single location).
:param list(str) contact_addresses: list of contact addresses for
the specified location
:param str location: location to place the contact address in
:param bool overwrite_existing: if you want to replace existing
location to address mappings set this to True. Otherwise if
the location exists, only new addresses are appended
:param bool with_status: if set to True, a 3-tuple is returned with
(Element, modified, created), where the second and third tuple
items are booleans indicating the status
:raises UpdateElementFailed: failed to update element with reason
:rtype: MultiContactAddress
|
def child_count(self, only_direct=True):
"""Returns how many children this node has, either only the direct
children of this node or inclusive of all children nodes of this node.
"""
if not only_direct:
count = 0
for _node in self.dfs_iter():
count += 1
return count
return len(self._children)
|
Returns how many children this node has, either only the direct
children of this node or inclusive of all children nodes of this node.
|
def get_filestore_instance(img_dir=None, data_dir=None):
"""Return an instance of FileStore."""
global _filestore_instances
key = "%s:%s" % (img_dir, data_dir)
try:
instance = _filestore_instances[key]
except KeyError:
instance = FileStore(
img_dir=img_dir, data_dir=data_dir
)
_filestore_instances[key] = instance
return instance
|
Return an instance of FileStore.
|
def _parse_ethtool_pppoe_opts(opts, iface):
'''
Filters given options and outputs valid settings for ETHTOOLS_PPPOE_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
config = {}
for opt in _DEB_CONFIG_PPPOE_OPTS:
if opt in opts:
config[opt] = opts[opt]
if 'provider' in opts and not opts['provider']:
_raise_error_iface(iface, 'provider', _CONFIG_TRUE + _CONFIG_FALSE)
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ('noipdefault', 'usepeerdns', 'defaultroute', 'hide-password', 'noauth', 'persist', 'noaccomp'):
if option in opts:
if opts[option] in _CONFIG_TRUE:
config.update({option: 'True'})
elif opts[option] in _CONFIG_FALSE:
config.update({option: 'False'})
else:
_raise_error_iface(iface, option, valid)
return config
|
Filters given options and outputs valid settings for ETHTOOLS_PPPOE_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
|
def _save_to_database(url, property_name, data):
"""
Store `data` under `property_name` in the `url` key in REST API DB.
Args:
url (obj): URL of the resource to which `property_name` will be stored.
property_name (str): Name of the property under which the `data` will
be stored.
data (obj): Any object.
"""
data = json.dumps([
d.to_dict() if hasattr(d, "to_dict") else d
for d in data
])
logger.debug("_save_to_database() data: %s" % repr(data))
requests.post(
_WEB_URL + _REQUEST_DB_SAVE,
timeout=REQUEST_TIMEOUT,
allow_redirects=True,
verify=False,
data={
"url": url,
"value": data,
"property_name": property_name,
}
)
logger.info(
"`%s` for `%s` sent to REST DB." % (
property_name,
url,
)
)
|
Store `data` under `property_name` in the `url` key in REST API DB.
Args:
url (obj): URL of the resource to which `property_name` will be stored.
property_name (str): Name of the property under which the `data` will
be stored.
data (obj): Any object.
|
def _access_through_cftimeindex(values, name):
"""Coerce an array of datetime-like values to a CFTimeIndex
and access requested datetime component
"""
from ..coding.cftimeindex import CFTimeIndex
values_as_cftimeindex = CFTimeIndex(values.ravel())
if name == 'season':
months = values_as_cftimeindex.month
field_values = _season_from_months(months)
else:
field_values = getattr(values_as_cftimeindex, name)
return field_values.reshape(values.shape)
|
Coerce an array of datetime-like values to a CFTimeIndex
and access requested datetime component
|
def Column(self, column_name):
"""Iterates over values of a given column.
Args:
column_name: A nome of the column to retrieve the values for.
Yields:
Values of the specified column.
Raises:
KeyError: If given column is not present in the table.
"""
column_idx = None
for idx, column in enumerate(self.header.columns):
if column.name == column_name:
column_idx = idx
break
if column_idx is None:
raise KeyError("Column '{}' not found".format(column_name))
for row in self.rows:
yield row.values[column_idx]
|
Iterates over values of a given column.
Args:
column_name: A nome of the column to retrieve the values for.
Yields:
Values of the specified column.
Raises:
KeyError: If given column is not present in the table.
|
def unmatched_quotes_in_line(text):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either
type in the string is odd.
Take from the IPython project (in IPython/core/completer.py in v0.13)
Spyder team: Add some changes to deal with escaped quotes
- Copyright (C) 2008-2011 IPython Development Team
- Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
- Copyright (C) 2001 Python Software Foundation, www.python.org
Distributed under the terms of the BSD License.
"""
# We check " first, then ', so complex cases with nested quotes will
# get the " to take precedence.
text = text.replace("\\'", "")
text = text.replace('\\"', '')
if text.count('"') % 2:
return '"'
elif text.count("'") % 2:
return "'"
else:
return ''
|
Return whether a string has open quotes.
This simply counts whether the number of quote characters of either
type in the string is odd.
Take from the IPython project (in IPython/core/completer.py in v0.13)
Spyder team: Add some changes to deal with escaped quotes
- Copyright (C) 2008-2011 IPython Development Team
- Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
- Copyright (C) 2001 Python Software Foundation, www.python.org
Distributed under the terms of the BSD License.
|
def _get_metrics_to_collect(self, instance_key, additional_metrics):
"""
Return and cache the list of metrics to collect.
"""
if instance_key not in self.metrics_to_collect_by_instance:
self.metrics_to_collect_by_instance[instance_key] = self._build_metric_list_to_collect(additional_metrics)
return self.metrics_to_collect_by_instance[instance_key]
|
Return and cache the list of metrics to collect.
|
def add_template_events_to_network(self, columns, vectors):
""" Add a vector indexed """
# Just call through to the standard function
self.template_events = self.template_event_dict['network']
self.add_template_network_events(columns, vectors)
self.template_event_dict['network'] = self.template_events
self.template_events = None
|
Add a vector indexed
|
def press_keys(self,characters=[]):
"""Press a given character key."""
for character in characters:
self.press_key(character)
for character in characters:
self.release_key(character)
|
Press a given character key.
|
def y(self, y):
"""Project y as x"""
if y is None:
return None
if self._force_vertical:
return super(HorizontalView, self).y(y)
return super(HorizontalView, self).x(y)
|
Project y as x
|
def insertUnorderedList(self):
"""
Inserts an ordered list into the editor.
"""
cursor = self.editor().textCursor()
currlist = cursor.currentList()
new_style = QTextListFormat.ListDisc
indent = 1
if currlist:
format = currlist.format()
indent = format.indent() + 1
style = format.style()
if style == QTextListFormat.ListDisc:
new_style = QTextListFormat.ListCircle
elif style == QTextListFormat.ListCircle:
new_style = QTextListFormat.ListSquare
new_format = QTextListFormat()
new_format.setStyle(new_style)
new_format.setIndent(indent)
new_list = cursor.createList(new_format)
self.editor().setFocus()
return new_list
|
Inserts an ordered list into the editor.
|
def parse_link_header(link):
"""takes the link header as a string and returns a dictionary with rel values as keys and urls as values
:param link: link header as a string
:rtype: dictionary {rel_name: rel_value}
"""
rel_dict = {}
for rels in link.split(','):
rel_break = quoted_split(rels, ';')
try:
rel_url = re.search('<(.+?)>', rel_break[0]).group(1)
rel_names = quoted_split(rel_break[1], '=')[-1]
if rel_names.startswith('"') and rel_names.endswith('"'):
rel_names = rel_names[1:-1]
for name in rel_names.split():
rel_dict[name] = rel_url
except (AttributeError, IndexError):
pass
return rel_dict
|
takes the link header as a string and returns a dictionary with rel values as keys and urls as values
:param link: link header as a string
:rtype: dictionary {rel_name: rel_value}
|
def is_trusted_subject(request):
"""Determine if calling subject is fully trusted."""
logging.debug('Active subjects: {}'.format(', '.join(request.all_subjects_set)))
logging.debug('Trusted subjects: {}'.format(', '.join(get_trusted_subjects())))
return not request.all_subjects_set.isdisjoint(get_trusted_subjects())
|
Determine if calling subject is fully trusted.
|
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Transformer fast decoding."""
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
"k":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
key_channels]), hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
value_channels]), hparams.num_heads),
} for layer in range(num_layers)
})
# If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the
# cache key "f" won't be used, which means that the` shape of cache["f"]`
# won't be changed to
# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause
# error when applying `nest.map reshape function` on it.
if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]:
for layer in range(num_layers):
cache["layer_%d" % layer]["f"] = tf.zeros(
[batch_size, 0, hparams.hidden_size])
if encoder_output is not None:
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(
"%sdecoder/%s/encdec_attention/multihead_attention" %
(scope_prefix, layer_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name]["k_encdec"] = k_encdec
cache[layer_name]["v_encdec"] = v_encdec
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache
|
Create the initial cache for Transformer fast decoding.
|
def features_tags_parse_str_to_dict(obj):
"""
Parse tag strings of all features in the collection into a Python
dictionary, if possible.
"""
features = obj['features']
for i in tqdm(range(len(features))):
tags = features[i]['properties'].get('tags')
if tags is not None:
try:
tags = json.loads("{" + tags.replace("=>", ":") + "}")
except:
try:
tags = eval("{" + tags.replace("=>", ":") + "}")
except:
tags = None
if type(tags) == dict:
features[i]['properties']['tags'] = {k:tags[k] for k in tags}
elif tags is None and 'tags' in features[i]['properties']:
del features[i]['properties']['tags']
return obj
|
Parse tag strings of all features in the collection into a Python
dictionary, if possible.
|
def infer_id(self, ident, diagnostic=None):
"""
Infer type from an ID!
- check if ID is declarated in the scope
- if no ID is polymorphic type
"""
# check if ID is declared
#defined = self.type_node.get_by_symbol_name(ident)
defined = self.infer_node.scope_node.get_by_symbol_name(ident)
if len(defined) > 0:
# set from matchings declarations
#self.type_node.update(defined)
self.infer_node.scope_node.update(defined)
else:
diagnostic.notify(
Severity.ERROR,
"%s never declared" % self.value,
self.info
)
|
Infer type from an ID!
- check if ID is declarated in the scope
- if no ID is polymorphic type
|
def patch(self, pid, record, **kwargs):
"""Modify a record.
Permissions: ``update_permission_factory``
The data should be a JSON-patch, which will be applied to the record.
Requires header ``Content-Type: application/json-patch+json``.
Procedure description:
#. The record is deserialized using the proper loader.
#. The ETag is checked.
#. The record is patched.
#. The HTTP response is built with the help of the link factory.
:param pid: Persistent identifier for record.
:param record: Record object.
:returns: The modified record.
"""
data = self.loaders[request.mimetype]()
if data is None:
raise InvalidDataRESTError()
self.check_etag(str(record.revision_id))
try:
record = record.patch(data)
except (JsonPatchException, JsonPointerException):
raise PatchJSONFailureRESTError()
record.commit()
db.session.commit()
if self.indexer_class:
self.indexer_class().index(record)
return self.make_response(
pid, record, links_factory=self.links_factory)
|
Modify a record.
Permissions: ``update_permission_factory``
The data should be a JSON-patch, which will be applied to the record.
Requires header ``Content-Type: application/json-patch+json``.
Procedure description:
#. The record is deserialized using the proper loader.
#. The ETag is checked.
#. The record is patched.
#. The HTTP response is built with the help of the link factory.
:param pid: Persistent identifier for record.
:param record: Record object.
:returns: The modified record.
|
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
|
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
|
def _check_branch(self, revision, branch):
'''
Used to find out if the revision is in the given branch.
:param revision: Revision to check.
:param branch: Branch to check revision on.
:return: True/False - Found it/Didn't find it
'''
# Get a changelog
clog_url = self.hg_url / branch / 'json-log' / revision
try:
Log.note("Searching through changelog {{url}}", url=clog_url)
clog_obj = http.get_json(clog_url, retry=RETRY)
if isinstance(clog_obj, (text_type, str)):
Log.note(
"Revision {{cset}} does not exist in the {{branch}} branch",
cset=revision, branch=branch
)
return False
except Exception as e:
Log.note("Unexpected error getting changset-log for {{url}}: {{error}}", url=clog_url, error=e)
return False
return True
|
Used to find out if the revision is in the given branch.
:param revision: Revision to check.
:param branch: Branch to check revision on.
:return: True/False - Found it/Didn't find it
|
def get_help(self):
"""
Get context help, depending on the current step. If no help for current step
was specified in scenario description file, default one will be returned.
"""
current_state = self.get_current_state()
if current_state is None:
return statement(INTERNAL_ERROR_MSG)
else:
try:
return choice(self._scenario_steps[current_state]['help'])
except KeyError:
return choice(self._default_help)
|
Get context help, depending on the current step. If no help for current step
was specified in scenario description file, default one will be returned.
|
def _to_map_job_config(cls,
mr_spec,
# TODO(user): Remove this parameter after it can be
# read from mr_spec.
queue_name):
"""Converts model.MapreduceSpec back to JobConfig.
This method allows our internal methods to use JobConfig directly.
This method also allows us to expose JobConfig as an API during execution,
despite that it is not saved into datastore.
Args:
mr_spec: model.MapreduceSpec.
queue_name: queue name.
Returns:
The JobConfig object for this job.
"""
mapper_spec = mr_spec.mapper
# 0 means all the old APIs before api_version is introduced.
api_version = mr_spec.params.get("api_version", 0)
old_api = api_version == 0
# Deserialize params from json if input_reader/output_writer are new API.
input_reader_cls = mapper_spec.input_reader_class()
input_reader_params = input_readers._get_params(mapper_spec)
if issubclass(input_reader_cls, input_reader.InputReader):
input_reader_params = input_reader_cls.params_from_json(
input_reader_params)
output_writer_cls = mapper_spec.output_writer_class()
output_writer_params = output_writers._get_params(mapper_spec)
# TODO(user): Call json (de)serialization for writer.
# if (output_writer_cls and
# issubclass(output_writer_cls, output_writer.OutputWriter)):
# output_writer_params = output_writer_cls.params_from_json(
# output_writer_params)
# We can not always convert MapreduceSpec generated by older API
# to JobConfig. Thus, mr framework should use/expose the returned JobConfig
# object with caution when a job is started with an old API.
# In this case, this method only tries not to blow up and assemble a
# JobConfig object as accurate as possible.
return cls(_lenient=old_api,
job_name=mr_spec.name,
job_id=mr_spec.mapreduce_id,
# handler_spec from older API may not have map_job.Mapper type.
mapper=util.for_name(mapper_spec.handler_spec),
input_reader_cls=input_reader_cls,
input_reader_params=input_reader_params,
output_writer_cls=output_writer_cls,
output_writer_params=output_writer_params,
shard_count=mapper_spec.shard_count,
queue_name=queue_name,
user_params=mr_spec.params.get("user_params"),
shard_max_attempts=mr_spec.params.get("shard_max_attempts"),
done_callback_url=mr_spec.params.get("done_callback"),
_force_writes=mr_spec.params.get("force_writes"),
_base_path=mr_spec.params["base_path"],
_task_max_attempts=mr_spec.params.get("task_max_attempts"),
_task_max_data_processing_attempts=(
mr_spec.params.get("task_max_data_processing_attempts")),
_hooks_cls=util.for_name(mr_spec.hooks_class_name),
_app=mr_spec.params.get("app_id"),
_api_version=api_version)
|
Converts model.MapreduceSpec back to JobConfig.
This method allows our internal methods to use JobConfig directly.
This method also allows us to expose JobConfig as an API during execution,
despite that it is not saved into datastore.
Args:
mr_spec: model.MapreduceSpec.
queue_name: queue name.
Returns:
The JobConfig object for this job.
|
def get_generator(self, path, *args, **kw_args):
"""
Get a generator that allows convenient access to the streamed data.
Elements from the dataset are returned from the generator one row at a time.
Unlike the direct access queue, this generator also returns the remainder elements.
Additional arguments are forwarded to get_queue.
See the get_queue method for documentation of these parameters.
:param path:
:return: A generator that iterates over the rows in the dataset.
"""
q = self.get_queue(path=path, *args, **kw_args)
try:
# This generator just implements a standard access pattern for the direct access queue.
for guard in q.iter():
with guard as batch:
batch_copy = batch.copy()
for row in batch_copy:
yield row
last_batch = self.get_remainder(path, q.block_size)
for row in last_batch:
yield row
finally:
q.close()
|
Get a generator that allows convenient access to the streamed data.
Elements from the dataset are returned from the generator one row at a time.
Unlike the direct access queue, this generator also returns the remainder elements.
Additional arguments are forwarded to get_queue.
See the get_queue method for documentation of these parameters.
:param path:
:return: A generator that iterates over the rows in the dataset.
|
def default_should_trace_hook(frame, filename):
'''
Return True if this frame should be traced, False if tracing should be blocked.
'''
# First, check whether this code object has a cached value
ignored_lines = _filename_to_ignored_lines.get(filename)
if ignored_lines is None:
# Now, look up that line of code and check for a @DontTrace
# preceding or on the same line as the method.
# E.g.:
# #@DontTrace
# def test():
# pass
# ... or ...
# def test(): #@DontTrace
# pass
ignored_lines = {}
lines = linecache.getlines(filename)
for i_line, line in enumerate(lines):
j = line.find('#')
if j >= 0:
comment = line[j:]
if DONT_TRACE_TAG in comment:
ignored_lines[i_line] = 1
#Note: when it's found in the comment, mark it up and down for the decorator lines found.
k = i_line - 1
while k >= 0:
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k -= 1
else:
break
k = i_line + 1
while k <= len(lines):
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k += 1
else:
break
_filename_to_ignored_lines[filename] = ignored_lines
func_line = frame.f_code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed
return not (
func_line - 1 in ignored_lines or #-1 to get line before method
func_line in ignored_lines)
|
Return True if this frame should be traced, False if tracing should be blocked.
|
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
|
Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
|
def create(self, dataset_name, query, index_by, display_name):
""" Create a Cached Dataset for a Project. Master key must be set.
"""
url = "{0}/{1}".format(self._cached_datasets_url, dataset_name)
payload = {
"query": query,
"index_by": index_by,
"display_name": display_name
}
return self._get_json(HTTPMethods.PUT, url, self._get_master_key(), json=payload)
|
Create a Cached Dataset for a Project. Master key must be set.
|
def github_gfonts_ttFont(ttFont, license):
"""Get a TTFont object of a font downloaded
from Google Fonts git repository.
"""
if not license:
return
from fontbakery.utils import download_file
from fontTools.ttLib import TTFont
from urllib.request import HTTPError
LICENSE_DIRECTORY = {
"OFL.txt": "ofl",
"UFL.txt": "ufl",
"LICENSE.txt": "apache"
}
filename = os.path.basename(ttFont.reader.file.name)
fontname = filename.split('-')[0].lower()
url = ("https://github.com/google/fonts/raw/master"
"/{}/{}/{}").format(LICENSE_DIRECTORY[license],
fontname,
filename)
try:
fontfile = download_file(url)
return TTFont(fontfile)
except HTTPError:
return None
|
Get a TTFont object of a font downloaded
from Google Fonts git repository.
|
def authenticated_request(self, endpoint, method='GET', params=None, data=None):
'''
Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type
Params:
endpoint -- API endpoint to send request to
Keyword Args:
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
'''
headers = {
'X-Access-Token' : self.access_token,
'X-Client-ID' : self.client_id
}
return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)
|
Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type
Params:
endpoint -- API endpoint to send request to
Keyword Args:
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
|
def yaml_load(source, defaultdata=NO_DEFAULT):
"""merge YAML data from files found in source
Always returns a dict. The YAML files are expected to contain some kind of
key:value structures, possibly deeply nested. When merging, lists are
appended and dict keys are replaced. The YAML files are read with the
yaml.safe_load function.
source can be a file, a dir, a list/tuple of files or a string containing
a glob expression (with ?*[]).
For a directory, all *.yaml files will be read in alphabetical order.
defaultdata can be used to initialize the data.
"""
logger = logging.getLogger(__name__)
logger.debug("initialized with source=%s, defaultdata=%s", source, defaultdata)
if defaultdata is NO_DEFAULT:
data = None
else:
data = defaultdata
files = []
if type(source) is not str and len(source) == 1:
# when called from __main source is always a list, even if it contains only one item.
# turn into a string if it contains only one item to support our different call modes
source = source[0]
if type(source) is list or type(source) is tuple:
# got a list, assume to be files
files = source
elif os.path.isdir(source):
# got a dir, read all *.yaml files
files = sorted(glob.glob(os.path.join(source, "*.yaml")))
elif os.path.isfile(source):
# got a single file, turn it into list to use the same code
files = [source]
else:
# try to use the source as a glob
files = sorted(glob.glob(source))
if files:
logger.debug("Reading %s\n", ", ".join(files))
for yaml_file in files:
try:
with open(yaml_file) as f:
new_data = safe_load(f)
logger.debug("YAML LOAD: %s", new_data)
except MarkedYAMLError as e:
logger.error("YAML Error: %s", e)
raise YamlReaderError("YAML Error: %s" % str(e))
if new_data is not None:
data = data_merge(data, new_data)
else:
if defaultdata is NO_DEFAULT:
logger.error("No YAML data found in %s and no default data given", source)
raise YamlReaderError("No YAML data found in %s" % source)
return data
|
merge YAML data from files found in source
Always returns a dict. The YAML files are expected to contain some kind of
key:value structures, possibly deeply nested. When merging, lists are
appended and dict keys are replaced. The YAML files are read with the
yaml.safe_load function.
source can be a file, a dir, a list/tuple of files or a string containing
a glob expression (with ?*[]).
For a directory, all *.yaml files will be read in alphabetical order.
defaultdata can be used to initialize the data.
|
def get_path_url(path, relative=False):
""" Returns an absolute or relative path url given a path
"""
if relative:
return os.path.relpath(path)
else:
return 'file://%s' % os.path.abspath(path)
|
Returns an absolute or relative path url given a path
|
def distance(p1, p2):
"""
Cartesian distance between two PoseStamped or PoseLists
:param p1: point 1 (list, Pose or PoseStamped)
:param p2: point 2 (list, Pose or PoseStamped)
:return: cartesian distance (float)
"""
def xyz(some_pose):
if isinstance(some_pose, PoseStamped):
return some_pose.pose.position.x, some_pose.pose.position.y, some_pose.pose.position.z
elif isinstance(some_pose, Pose):
return some_pose.position.x, some_pose.position.y, some_pose.position.z
elif _is_indexable(some_pose[0]):
return some_pose[0][0], some_pose[0][1], some_pose[0][2]
else:
return some_pose[0], some_pose[1], some_pose[2]
x1, y1, z1 = xyz(p1)
x2, y2, z2 = xyz(p2)
x = x1 - x2
y = y1 - y2
z = z1 - z2
return sqrt(x * x + y * y + z * z)
|
Cartesian distance between two PoseStamped or PoseLists
:param p1: point 1 (list, Pose or PoseStamped)
:param p2: point 2 (list, Pose or PoseStamped)
:return: cartesian distance (float)
|
def visit_break(self, node, parent):
"""visit a Break node by returning a fresh instance of it"""
return nodes.Break(
getattr(node, "lineno", None), getattr(node, "col_offset", None), parent
)
|
visit a Break node by returning a fresh instance of it
|
def lm_tfinal(damping_times, modes):
"""Return the maximum t_final of the modes given, with t_final the time
at which the amplitude falls to 1/1000 of the peak amplitude
"""
t_max = {}
for lmn in modes:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
for n in range(nmodes):
t_max['%d%d%d' %(l,m,n)] = \
qnm_time_decay(damping_times['%d%d%d' %(l,m,n)], 1./1000)
return max(t_max.values())
|
Return the maximum t_final of the modes given, with t_final the time
at which the amplitude falls to 1/1000 of the peak amplitude
|
def nvrtcGetLoweredName(self, prog, name_expression):
"""
Notes the given name expression denoting a __global__ function or
function template instantiation.
"""
lowered_name = c_char_p()
code = self._lib.nvrtcGetLoweredName(prog,
c_char_p(encode_str(name_expression)),
byref(lowered_name))
self._throw_on_error(code)
return lowered_name.value.decode('utf-8')
|
Notes the given name expression denoting a __global__ function or
function template instantiation.
|
def host2id(self, hostname):
"""return member id by hostname"""
for key, value in self.server_map.items():
if value == hostname:
return key
|
return member id by hostname
|
def cat_acc(y_true, y_pred):
"""Categorical accuracy
"""
return np.mean(y_true.argmax(axis=1) == y_pred.argmax(axis=1))
|
Categorical accuracy
|
def parse_latitude(latitude, hemisphere):
"""Parse a NMEA-formatted latitude pair.
Args:
latitude (str): Latitude in DDMM.MMMM
hemisphere (str): North or South
Returns:
float: Decimal representation of latitude
"""
latitude = int(latitude[:2]) + float(latitude[2:]) / 60
if hemisphere == 'S':
latitude = -latitude
elif not hemisphere == 'N':
raise ValueError('Incorrect North/South value %r' % hemisphere)
return latitude
|
Parse a NMEA-formatted latitude pair.
Args:
latitude (str): Latitude in DDMM.MMMM
hemisphere (str): North or South
Returns:
float: Decimal representation of latitude
|
def _arguments_repr(self):
"""Representation of the arguments used to create this object."""
document_class_repr = (
'dict' if self.document_class is dict
else repr(self.document_class))
uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation,
self.uuid_representation)
return ('document_class=%s, tz_aware=%r, uuid_representation=%s, '
'unicode_decode_error_handler=%r, tzinfo=%r, '
'type_registry=%r' %
(document_class_repr, self.tz_aware, uuid_rep_repr,
self.unicode_decode_error_handler, self.tzinfo,
self.type_registry))
|
Representation of the arguments used to create this object.
|
def format_decimal(decimal):
"""
Formats a decimal number
:param decimal: the decimal value
:return: the formatted string value
"""
# strip trailing fractional zeros
normalized = decimal.normalize()
sign, digits, exponent = normalized.as_tuple()
if exponent >= 1:
normalized = normalized.quantize(1)
return str(normalized)
|
Formats a decimal number
:param decimal: the decimal value
:return: the formatted string value
|
def _del_cached_value(self, xblock):
"""Remove a value from the xblock's cache, if the cache exists."""
# pylint: disable=protected-access
if hasattr(xblock, '_field_data_cache') and self.name in xblock._field_data_cache:
del xblock._field_data_cache[self.name]
|
Remove a value from the xblock's cache, if the cache exists.
|
def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None):
"""A stack of separable convolution blocks with residual connections."""
with tf.variable_scope(name):
padding_bias = None
if mask is not None:
padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding.
if padding == "LEFT": # Do not mask anything when left-padding.
mask = None
if (hparams.kernel_scheme in _KERNEL_SCHEMES and
hparams.dilation_scheme in _DILATION_SCHEMES):
kernels = _KERNEL_SCHEMES[hparams.kernel_scheme]
dilations = _DILATION_SCHEMES[hparams.dilation_scheme]
dilations_and_kernels = list(zip(dilations, kernels))
dilations_and_kernels1 = dilations_and_kernels[:2]
dilations_and_kernels2 = dilations_and_kernels[2:]
else:
k = (hparams.kernel_height, hparams.kernel_width)
k2 = (hparams.large_kernel_size, 1)
dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)]
dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)]
separabilities1 = [hparams.separability, hparams.separability]
separabilities2 = [hparams.separability] * len(dilations_and_kernels2)
if hparams.separability < 0:
separabilities1 = [hparams.separability - 1, hparams.separability]
separabilities2 = [
hparams.separability - i
for i in reversed(range(len(dilations_and_kernels2)))
]
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(
x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon)
for layer in range(layers):
with tf.variable_scope("layer_%d" % layer):
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels1,
normalizer_fn=norm_fn,
padding=padding,
mask=mask,
separabilities=separabilities1,
name="residual1")
x += common_layers.subseparable_conv_block(
x + y,
hparams.hidden_size,
dilations_and_kernels2,
normalizer_fn=norm_fn,
padding=padding,
mask=mask,
separabilities=separabilities2,
name="residual2") + y
if source is not None and hparams.attention_type != "none":
x += attention(x, source, norm_fn, hparams, bias=padding_bias)
if mask is not None:
x *= mask
return tf.nn.dropout(x, 1.0 - hparams.dropout)
|
A stack of separable convolution blocks with residual connections.
|
def is_Type(tp):
"""Python version independent check if an object is a type.
For Python 3.7 onwards(?) this is not equivalent to
``isinstance(tp, type)`` any more, as that call would return
``False`` for PEP 484 types.
Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1.
"""
if isinstance(tp, type):
return True
try:
typing._type_check(tp, '')
return True
except TypeError:
return False
|
Python version independent check if an object is a type.
For Python 3.7 onwards(?) this is not equivalent to
``isinstance(tp, type)`` any more, as that call would return
``False`` for PEP 484 types.
Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1.
|
def get_ip_address_info(ip_address, cache=None, nameservers=None,
timeout=2.0, parallel=False):
"""
Returns reverse DNS and country information for the given IP address
Args:
ip_address (str): The IP address to check
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): parallel processing
Returns:
OrderedDict: ``ip_address``, ``reverse_dns``
"""
ip_address = ip_address.lower()
if cache:
info = cache.get(ip_address, None)
if info:
return info
info = OrderedDict()
info["ip_address"] = ip_address
reverse_dns = get_reverse_dns(ip_address,
nameservers=nameservers,
timeout=timeout)
country = get_ip_address_country(ip_address, parallel=parallel)
info["country"] = country
info["reverse_dns"] = reverse_dns
info["base_domain"] = None
if reverse_dns is not None:
base_domain = get_base_domain(reverse_dns)
info["base_domain"] = base_domain
return info
|
Returns reverse DNS and country information for the given IP address
Args:
ip_address (str): The IP address to check
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS timeout in seconds
parallel (bool): parallel processing
Returns:
OrderedDict: ``ip_address``, ``reverse_dns``
|
def __general(self):
"""Level-0 parser and main loop.
Look for a token that matches a level-1 parser and hand over control."""
while 1: # main loop
try:
tok = self.__peek() # only peek, apply_parser() will consume
except DXParserNoTokens:
# save previous DXInitObject
# (kludge in here as the last level-2 parser usually does not return
# via the object parser)
if self.currentobject and self.currentobject not in self.objects:
self.objects.append(self.currentobject)
return # stop parsing and finish
# decision branches for all level-1 parsers:
# (the only way to get out of the lower level parsers!)
if tok.iscode('COMMENT'):
self.set_parser('comment') # switch the state
elif tok.iscode('WORD') and tok.equals('object'):
self.set_parser('object') # switch the state
elif self.__parser is self.__general:
# Either a level-2 parser screwed up or some level-1
# construct is not implemented. (Note: this elif can
# be only reached at the beginning or after comments;
# later we never formally switch back to __general
# (would create inifinite loop)
raise DXParseError('Unknown level-1 construct at '+str(tok))
self.apply_parser()
|
Level-0 parser and main loop.
Look for a token that matches a level-1 parser and hand over control.
|
def _compare_strings(cls, source, target):
"""
Compares a source string to a target string,
and addresses the condition in which the source string
includes unquoted special characters.
It performs a simple regular expression match,
with the assumption that (as required) unquoted special characters
appear only at the beginning and/or the end of the source string.
It also properly differentiates between unquoted and quoted
special characters.
:param string source: First string value
:param string target: Second string value
:returns: The comparison relation among input strings.
:rtype: int
"""
start = 0
end = len(source)
begins = 0
ends = 0
# Reading of initial wildcard in source
if source.startswith(CPEComponent2_3_WFN.WILDCARD_MULTI):
# Source starts with "*"
start = 1
begins = -1
else:
while ((start < len(source)) and
source.startswith(CPEComponent2_3_WFN.WILDCARD_ONE,
start, start)):
# Source starts with one or more "?"
start += 1
begins += 1
# Reading of final wildcard in source
if (source.endswith(CPEComponent2_3_WFN.WILDCARD_MULTI) and
CPESet2_3._is_even_wildcards(source, end - 1)):
# Source ends in "*"
end -= 1
ends = -1
else:
while ((end > 0) and
source.endswith(CPEComponent2_3_WFN.WILDCARD_ONE, end - 1, end) and
CPESet2_3._is_even_wildcards(source, end - 1)):
# Source ends in "?"
end -= 1
ends += 1
source = source[start: end]
index = -1
leftover = len(target)
while (leftover > 0):
index = target.find(source, index + 1)
if (index == -1):
break
escapes = target.count("\\", 0, index)
if ((index > 0) and (begins != -1) and
(begins < (index - escapes))):
break
escapes = target.count("\\", index + 1, len(target))
leftover = len(target) - index - escapes - len(source)
if ((leftover > 0) and ((ends != -1) and (leftover > ends))):
continue
return CPESet2_3.LOGICAL_VALUE_SUPERSET
return CPESet2_3.LOGICAL_VALUE_DISJOINT
|
Compares a source string to a target string,
and addresses the condition in which the source string
includes unquoted special characters.
It performs a simple regular expression match,
with the assumption that (as required) unquoted special characters
appear only at the beginning and/or the end of the source string.
It also properly differentiates between unquoted and quoted
special characters.
:param string source: First string value
:param string target: Second string value
:returns: The comparison relation among input strings.
:rtype: int
|
def resolve(self, cfg, addr, func_addr, block, jumpkind):
"""
Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:param str jumpkind: The jumpkind.
:return: If it was resolved and targets alongside it
:rtype: tuple
"""
project = self.project
b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,
ignored_regs=('gp',)
)
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
if not sources:
return False, []
source = sources[0]
source_addr = source[0]
annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)
annotated_cfg.from_digraph(b.slice)
state = project.factory.blank_state(addr=source_addr, mode="fastpath",
remove_options=options.refs
)
func = cfg.kb.functions.function(addr=func_addr)
gp_offset = project.arch.registers['gp'][0]
if 'gp' not in func.info:
sec = project.loader.find_section_containing(func.addr)
if sec is None or sec.name != '.plt':
# this might a special case: gp is only used once in this function, and it can be initialized right before
# its use site.
# TODO: handle this case
l.debug('Failed to determine value of register gp for function %#x.', func.addr)
return False, [ ]
else:
state.regs.gp = func.info['gp']
def overwrite_tmp_value(state):
state.inspect.tmp_write_expr = state.solver.BVV(func.info['gp'], state.arch.bits)
# Special handling for cases where `gp` is stored on the stack
got_gp_stack_store = False
for block_addr_in_slice in set(slice_node[0] for slice_node in b.slice.nodes()):
for stmt in project.factory.block(block_addr_in_slice).vex.statements:
if isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset and \
isinstance(stmt.data, pyvex.IRExpr.RdTmp):
tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop
# we must make sure value of that temporary variable equals to the correct gp value
state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,
condition=lambda s, bbl_addr_=block_addr_in_slice,
tmp_offset_=tmp_offset:
s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,
action=overwrite_tmp_value
)
got_gp_stack_store = True
break
if got_gp_stack_store:
break
simgr = self.project.factory.simulation_manager(state)
simgr.use_technique(Slicecutor(annotated_cfg))
simgr.run()
if simgr.cut:
target = simgr.cut[0].addr
if self._is_target_valid(cfg, target):
l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target)
return True, [ target ]
l.debug("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target)
return False, [ ]
l.debug("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
return False, [ ]
|
Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:param str jumpkind: The jumpkind.
:return: If it was resolved and targets alongside it
:rtype: tuple
|
def register_resources(klass, registry, resource_class):
""" meta model subscriber on resource registration.
We watch for new resource types being registered and if they
support aws config, automatically register the jsondiff filter.
"""
config_type = getattr(resource_class.resource_type, 'config_type', None)
if config_type is None:
return
resource_class.filter_registry.register('json-diff', klass)
|
meta model subscriber on resource registration.
We watch for new resource types being registered and if they
support aws config, automatically register the jsondiff filter.
|
def create_service(self, name, **kwargs):
"""
Creates a service with a name. All other parameters are optional. They
are: `note`, `hourly_rate`, `billable`, and `archived`.
"""
data = self._wrap_dict("service", kwargs)
data["customer"]["name"] = name
return self.post("/services.json", data=data)
|
Creates a service with a name. All other parameters are optional. They
are: `note`, `hourly_rate`, `billable`, and `archived`.
|
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if email and not EmailAddress.objects.filter(user=user,
email__iexact=email).exists():
if app_settings.UNIQUE_EMAIL \
and EmailAddress.objects.filter(email__iexact=email).exists():
# Bail out
return
EmailAddress.objects.create(user=user,
email=email,
primary=False,
verified=False)
|
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
|
def get_item_at(self, *args, **kwargs):
""" Return the items at the given position """
return self.proxy.get_item_at(coerce_point(*args, **kwargs))
|
Return the items at the given position
|
def flag(name=None):
"""
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'.
:param name: name for the field
:return: grammar for the flag field
"""
if name is None:
name = 'Flag Field'
# Basic field
field = pp.Regex('[YNU]')
# Name
field.setName(name)
field.leaveWhitespace()
return field
|
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'.
:param name: name for the field
:return: grammar for the flag field
|
def help(file=None):
"""
Print out syntax help for running ``astrodrizzle``
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
"""
helpstr = getHelpAsString(docstring=True, show_ver=True)
if file is None:
print(helpstr)
else:
with open(file, mode='w') as f:
f.write(helpstr)
|
Print out syntax help for running ``astrodrizzle``
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
|
def artist_top_tracks(self, spotify_id, country):
"""Get an artists top tracks per country with their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/artists/{spotify_id}/top-tracks', spotify_id=spotify_id)
payload = {'country': country}
return self.request(route, params=payload)
|
Get an artists top tracks per country with their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
country : COUNTRY_TP
COUNTRY
|
def _make_cookie(self):
"""
Return a string encoding the ID of the process, instance and thread.
This disambiguates legitimate wake-ups, accidental writes to the FD,
and buggy internal FD sharing.
"""
return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC,
os.getpid(), id(self), thread.get_ident())
|
Return a string encoding the ID of the process, instance and thread.
This disambiguates legitimate wake-ups, accidental writes to the FD,
and buggy internal FD sharing.
|
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
|
Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.