code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def del_repo(repo, **kwargs):
'''
Remove an XBPS repository from the system.
repo
url of repo to remove (persistent).
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo <repo url>
'''
try:
_locate_repo_files(repo, rewrite=True)
except IOError:
return False
else:
return True
|
Remove an XBPS repository from the system.
repo
url of repo to remove (persistent).
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo <repo url>
|
def isoncurve(self, p):
"""
verifies if a point is on the curve
"""
return p.iszero() or p.y ** 2 == p.x ** 3 + self.a * p.x + self.b
|
verifies if a point is on the curve
|
def _addRawResult(self, resid, values={}, override=False):
""" Structure of values dict (dict entry for each analysis/field):
{'ALC': {'ALC': '13.55',
'DefaultResult': 'ALC',
'Remarks': ''},
'CO2': {'CO2': '0.66',
'DefaultResult': 'CO2',
'Remarks': ''},
'Date': {'Date': '21/11/2013',
'DefaultResult': 'Date',
'Remarks': ''},
'Malo': {'DefaultResult': 'Malo',
'Malo': '0.26',
'Remarks': ''},
'Meth': {'DefaultResult': 'Meth',
'Meth': '0.58',
'Rep #': {'DefaultResult': 'Rep #',
'Remarks': '',
'Rep #': '1'}
}
"""
if 'Date' in values and 'Time' in values:
try:
dtstr = '%s %s' % (values.get('Date')['Date'], values.get('Time')['Time'])
# 2/11/2005 13:33 PM
from datetime import datetime
dtobj = datetime.strptime(dtstr, '%d/%m/%Y %H:%M %p')
dateTime = dtobj.strftime("%Y%m%d %H:%M:%S")
except:
pass
del values['Date']
del values['Time']
# Adding the date, time and calibration inside each analysis service result.
# I'm adding the calibration number here because it is the way we can avoid
# WINE-76 easly
for keyword in values.keys():
values[keyword]['DateTime'] = dateTime
values[keyword]['Calibration'] = self._calibration
# First, we must find if already exists a row with results for
# the same date, in order to take into account replicas, Mean
# and Standard Deviation
dtidx = values.get('Calibration',{}).get('Calibration',0)
rows = self.getRawResults().get(resid, [])
row, rows = self._extractrowbycalibration(rows, self._calibration)
is_std = values.get('Rep #',{}).get('Rep #','') == 'Sd'
is_mean = values.get('Rep #',{}).get('Rep #','') == 'Mean'
if is_std:
# Add the results of Standard Deviation. For each acode, add
# the Standard Result
del values['Rep #']
for key, value in values.iteritems():
row['Sd-%s' % key] = value
elif is_mean:
# Remove the # item and override with new values
row = values
del row['Rep #']
else:
# Override with new values
row = values
rows.append(row)
isfirst = True
for row in rows:
WinescanCSVParser._addRawResult(self, resid, row, isfirst)
isfirst = False
|
Structure of values dict (dict entry for each analysis/field):
{'ALC': {'ALC': '13.55',
'DefaultResult': 'ALC',
'Remarks': ''},
'CO2': {'CO2': '0.66',
'DefaultResult': 'CO2',
'Remarks': ''},
'Date': {'Date': '21/11/2013',
'DefaultResult': 'Date',
'Remarks': ''},
'Malo': {'DefaultResult': 'Malo',
'Malo': '0.26',
'Remarks': ''},
'Meth': {'DefaultResult': 'Meth',
'Meth': '0.58',
'Rep #': {'DefaultResult': 'Rep #',
'Remarks': '',
'Rep #': '1'}
}
|
def delete_server(self, datacenter_id, server_id):
"""
Removes the server from your data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
"""
response = self._perform_request(
url='/datacenters/%s/servers/%s' % (
datacenter_id,
server_id),
method='DELETE')
return response
|
Removes the server from your data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
|
def add_to_dict_val_set(dict_obj, key, val):
"""Adds the given val to the set mapped by the given key.
If the key is missing from the dict, the given mapping is added.
Example
-------
>>> dict_obj = {'a': set([1, 2])}
>>> add_to_dict_val_set(dict_obj, 'a', 2)
>>> print(dict_obj['a'])
{1, 2}
>>> add_to_dict_val_set(dict_obj, 'a', 3)
>>> print(dict_obj['a'])
{1, 2, 3}
"""
try:
dict_obj[key].add(val)
except KeyError:
dict_obj[key] = set([val])
|
Adds the given val to the set mapped by the given key.
If the key is missing from the dict, the given mapping is added.
Example
-------
>>> dict_obj = {'a': set([1, 2])}
>>> add_to_dict_val_set(dict_obj, 'a', 2)
>>> print(dict_obj['a'])
{1, 2}
>>> add_to_dict_val_set(dict_obj, 'a', 3)
>>> print(dict_obj['a'])
{1, 2, 3}
|
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
|
Set the version in _version.py to version_str
|
def render_error(category, error_message, error_codes, exception=None):
""" Render an error page.
Arguments:
category -- The category of the request
error_message -- The message to provide to the error template
error_codes -- The applicable HTTP error code(s). Will usually be an
integer or a list of integers; the HTTP error response will always
be the first error code in the list, and the others are alternates
for looking up the error template to use.
exception -- Any exception that led to this error page
"""
if isinstance(error_codes, int):
error_codes = [error_codes]
error_code = error_codes[0]
template_list = [str(code) for code in error_codes]
template_list.append(str(int(error_code / 100) * 100))
template_list.append('error')
template = map_template(category, template_list)
if template:
return render_publ_template(
template,
_url_root=request.url_root,
category=Category(category),
error={'code': error_code, 'message': error_message},
exception=exception)[0], error_code
# no template found, so fall back to default Flask handler
return flask.abort(error_code)
|
Render an error page.
Arguments:
category -- The category of the request
error_message -- The message to provide to the error template
error_codes -- The applicable HTTP error code(s). Will usually be an
integer or a list of integers; the HTTP error response will always
be the first error code in the list, and the others are alternates
for looking up the error template to use.
exception -- Any exception that led to this error page
|
def _get_url_datafiles(url_db_view, url_db_content,
mrio_regex, access_cookie=None):
""" Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
"""
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content',
['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff
for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls)
|
Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
|
def get_next_types(self, n=None):
"""Gets the next set of ``Types`` in this list.
The specified amount must be less than or equal to the return
from ``available()``.
arg: n (cardinal): the number of ``Type`` elements requested
which must be less than or equal to ``available()``
return: (osid.type.Type) - an array of ``Type`` elements.The
length of the array is less than or equal to the number
specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
import sys
from ..osid.osid_errors import IllegalState, OperationFailed
if n > self.available():
# !!! This is not quite as specified (see method docs) !!!
raise IllegalState('not enough elements available in this list')
else:
next_list = []
x = 0
while x < n:
try:
next_list.append(self.next())
except: # Need to specify exceptions here
raise OperationFailed()
x = x + 1
return next_list
|
Gets the next set of ``Types`` in this list.
The specified amount must be less than or equal to the return
from ``available()``.
arg: n (cardinal): the number of ``Type`` elements requested
which must be less than or equal to ``available()``
return: (osid.type.Type) - an array of ``Type`` elements.The
length of the array is less than or equal to the number
specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
|
def get_template_as_json(template_id, **kwargs):
"""
Get a template (including attribute and dataset definitions) as a JSON
string. This is just a wrapper around the get_template_as_dict function.
"""
user_id = kwargs['user_id']
return json.dumps(get_template_as_dict(template_id, user_id=user_id))
|
Get a template (including attribute and dataset definitions) as a JSON
string. This is just a wrapper around the get_template_as_dict function.
|
def add_cli_write_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``write_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.option('-d', '--directory', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),
help='output directory')
@click.pass_obj
def write(manager: BELNamespaceManagerMixin, directory: str):
"""Write a BEL namespace names/identifiers to terminology store."""
manager.write_directory(directory)
return main
|
Add a ``write_bel_namespace`` command to main :mod:`click` function.
|
def module(self, value):
"""
Setter for **self.__module** attribute.
:param value: Attribute value.
:type value: ModuleType
"""
if value is not None:
assert type(value) is type(sys), "'{0}' attribute: '{1}' type is not 'module'!".format("module", value)
self.__module = value
|
Setter for **self.__module** attribute.
:param value: Attribute value.
:type value: ModuleType
|
def all(klass, client, **kwargs):
"""Returns a Cursor instance for a given resource."""
resource = klass.RESOURCE_COLLECTION
request = Request(client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[client])
|
Returns a Cursor instance for a given resource.
|
def get_hashed_rule_name(event, function, lambda_name):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
event_name = event.get('name', function)
name_hash = hashlib.sha1('{}-{}'.format(lambda_name, event_name).encode('UTF-8')).hexdigest()
return Zappa.get_event_name(name_hash, function)
|
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
|
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
|
Build the HTML docs and open them in a web browser.
|
def fetch(self):
"""
Retrieves the content of the current security document from the remote
database and populates the locally cached SecurityDocument object with
that content. A call to fetch will overwrite any dictionary content
currently in the locally cached SecurityDocument object.
"""
resp = self.r_session.get(self.document_url)
resp.raise_for_status()
self.clear()
self.update(response_to_json_dict(resp))
|
Retrieves the content of the current security document from the remote
database and populates the locally cached SecurityDocument object with
that content. A call to fetch will overwrite any dictionary content
currently in the locally cached SecurityDocument object.
|
def version(self):
"""Return version of the TR DWE."""
res = self.client.service.Version()
return '.'.join([ustr(x) for x in res[0]])
|
Return version of the TR DWE.
|
def sendall_stderr(self, s):
"""
Send data to the channel's "stderr" stream, without allowing partial
results. Unlike L{send_stderr}, this method continues to send data
from the given string until all data has been sent or an error occurs.
Nothing is returned.
@param s: data to send to the client as "stderr" output.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@since: 1.1
"""
while s:
if self.closed:
raise socket.error('Socket is closed')
sent = self.send_stderr(s)
s = s[sent:]
return None
|
Send data to the channel's "stderr" stream, without allowing partial
results. Unlike L{send_stderr}, this method continues to send data
from the given string until all data has been sent or an error occurs.
Nothing is returned.
@param s: data to send to the client as "stderr" output.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@since: 1.1
|
def insert(self, dct, toa=None, comment=""):
"""Create a document
:param dict dct:
:param toa toa: Optional time of action, triggers this to be handled as a future insert action for a new document
:param str comment: A comment
:rtype str:
:returns string bson id:
"""
if self.schema:
jsonschema.validate(dct, self.schema)
bson_obj = yield self.collection.insert(dct)
raise Return(bson_obj.__str__())
|
Create a document
:param dict dct:
:param toa toa: Optional time of action, triggers this to be handled as a future insert action for a new document
:param str comment: A comment
:rtype str:
:returns string bson id:
|
def constraint_join(cfg_nodes):
"""Looks up all cfg_nodes and joins the bitvectors by using logical or."""
r = 0
for e in cfg_nodes:
r = r | constraint_table[e]
return r
|
Looks up all cfg_nodes and joins the bitvectors by using logical or.
|
def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
if format == 'CSV' and csv_delimiter is None:
csv_delimiter = ','
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise google.datalab.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e)))
|
Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
|
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
|
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
|
def search_for(self, query, include_draft=False):
"""
Search for a query text.
:param query: keyword to query
:param include_draft: return draft posts/pages or not
:return: an iterable object of posts and pages (if allowed).
"""
query = query.lower()
if not query:
return []
def contains_query_keyword(post_or_page):
contains = query in post_or_page.title.lower() \
or query in Markup(
get_parser(post_or_page.format).parse_whole(
post_or_page.raw_content)
).striptags().lower()
return contains
return filter(contains_query_keyword,
chain(self.get_posts(include_draft=include_draft),
self.get_pages(include_draft=include_draft)
if current_app.config[
'ALLOW_SEARCH_PAGES'] else []))
|
Search for a query text.
:param query: keyword to query
:param include_draft: return draft posts/pages or not
:return: an iterable object of posts and pages (if allowed).
|
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % { #@UndefinedVariable
'code' : code,
'message' : message,
'explain' : explain
}
sys.stdout.write('Status: %d %s\n' % (code, message))
sys.stdout.write('Content-Type: text/html\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response)
|
Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
|
def child(self, subkey):
"""
Retrieves a subkey for this Registry key, given its name.
@type subkey: str
@param subkey: Name of the subkey.
@rtype: L{RegistryKey}
@return: Subkey.
"""
path = self._path + '\\' + subkey
handle = win32.RegOpenKey(self.handle, subkey)
return RegistryKey(path, handle)
|
Retrieves a subkey for this Registry key, given its name.
@type subkey: str
@param subkey: Name of the subkey.
@rtype: L{RegistryKey}
@return: Subkey.
|
def create_domainalias(self, domainid, data):
"""Create a domain alias"""
return self.api_call(
ENDPOINTS['domainaliases']['new'],
dict(domainid=domainid),
body=data)
|
Create a domain alias
|
def deploy(cls, remote_name, branch):
"""Deploy a PaaS instance."""
def get_remote_url(remote):
return 'git config --local --get remote.%s.url' % (remote)
remote_url = cls.exec_output(get_remote_url(remote_name)) \
.replace('\n', '')
if not remote_url or not re.search('gpaas.net|gandi.net', remote_url):
remote_name = ('$(git config --local --get branch.%s.remote)' %
branch)
remote_url = cls.exec_output(get_remote_url(remote_name)) \
.replace('\n', '')
error = None
if not remote_url:
error = True
cls.echo('Error: Could not find git remote '
'to extract deploy url from.')
elif not re.search('gpaas.net|gandi.net', remote_url):
error = True
cls.echo('Error: %s is not a valid Simple Hosting git remote.'
% (remote_url))
if error:
cls.echo("""This usually happens when:
- the current directory has no Simple Hosting git remote attached,
in this case, please see $ gandi paas attach --help
- the local branch being deployed hasn't been pushed to the \
remote repository yet,
in this case, please try $ git push <remote> %s
""" % (branch))
cls.echo('Otherwise, it\'s recommended to use'
' the --remote and/or --branch options:\n'
'$ gandi deploy --remote <remote> [--branch <branch>]')
sys.exit(2)
remote_url_no_protocol = remote_url.split('://')[1]
splitted_url = remote_url_no_protocol.split('/')
paas_access = splitted_url[0]
deploy_git_host = splitted_url[1]
command = "ssh %s 'deploy %s %s'" \
% (paas_access, deploy_git_host, branch)
cls.execute(command)
|
Deploy a PaaS instance.
|
def pdf(self, d, n=None):
r'''Computes the probability density function of a
continuous particle size distribution at a specified particle diameter,
an optionally in a specified basis. The evaluation function varies with
the distribution chosen. The interconversion between distribution
orders is performed using the following formula [1]_:
.. math::
q_s(d) = \frac{x^{(s-r)} q_r(d) dd}
{ \int_0^\infty d^{(s-r)} q_r(d) dd}
Parameters
----------
d : float
Particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
pdf : float
The probability density function at the specified diameter and
order, [-]
Notes
-----
The pdf order conversions are typically available analytically after
some work. They have been verified numerically. See the various
functions with names ending with 'basis_integral' for the formulations.
The distributions normally do not have analytical limits for diameters
of 0 or infinity, but large values suffice to capture the area of the
integral.
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.pdf(1e-5)
30522.765209509154
>>> psd.pdf(1e-5, n=3)
30522.765209509154
>>> psd.pdf(1e-5, n=0)
1238.661379483343
References
----------
.. [1] Masuda, Hiroaki, Ko Higashitani, and Hideto Yoshida. Powder
Technology: Fundamentals of Particles, Powder Beds, and Particle
Generation. CRC Press, 2006.
'''
ans = self._pdf(d=d)
if n is not None and n != self.order:
power = n - self.order
numerator = d**power*ans
denominator = self._pdf_basis_integral_definite(d_min=0.0, d_max=self.d_excessive, n=power)
ans = numerator/denominator
# Handle splines which might go below zero
ans = max(ans, 0.0)
if self.truncated:
if d < self.d_min or d > self.d_max:
return 0.0
ans = (ans)/(self._cdf_d_max - self._cdf_d_min)
return ans
|
r'''Computes the probability density function of a
continuous particle size distribution at a specified particle diameter,
an optionally in a specified basis. The evaluation function varies with
the distribution chosen. The interconversion between distribution
orders is performed using the following formula [1]_:
.. math::
q_s(d) = \frac{x^{(s-r)} q_r(d) dd}
{ \int_0^\infty d^{(s-r)} q_r(d) dd}
Parameters
----------
d : float
Particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
pdf : float
The probability density function at the specified diameter and
order, [-]
Notes
-----
The pdf order conversions are typically available analytically after
some work. They have been verified numerically. See the various
functions with names ending with 'basis_integral' for the formulations.
The distributions normally do not have analytical limits for diameters
of 0 or infinity, but large values suffice to capture the area of the
integral.
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.pdf(1e-5)
30522.765209509154
>>> psd.pdf(1e-5, n=3)
30522.765209509154
>>> psd.pdf(1e-5, n=0)
1238.661379483343
References
----------
.. [1] Masuda, Hiroaki, Ko Higashitani, and Hideto Yoshida. Powder
Technology: Fundamentals of Particles, Powder Beds, and Particle
Generation. CRC Press, 2006.
|
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
self.buf.extend(c)
self.total_bytes_received += len(c)
if self.native:
if native_testing:
self.test_buf.extend(c)
m = self.__parse_char_native(self.test_buf)
m2 = self.__parse_char_legacy()
if m2 != m:
print("Native: %s\nLegacy: %s\n" % (m, m2))
raise Exception('Native vs. Legacy mismatch')
else:
m = self.__parse_char_native(self.buf)
else:
m = self.__parse_char_legacy()
if m != None:
self.total_packets_received += 1
self.__callbacks(m)
else:
# XXX The idea here is if we've read something and there's nothing left in
# the buffer, reset it to 0 which frees the memory
if self.buf_len() == 0 and self.buf_index != 0:
self.buf = bytearray()
self.buf_index = 0
return m
|
input some data bytes, possibly returning a new message
|
def _construct(self):
"""
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
"""
self._acyclic_cfg = self._cfg.copy()
# TODO: Cycle-removing is not needed - confirm it later
# The CFG we use should be acyclic!
#self._acyclic_cfg.remove_cycles()
# Pre-process the acyclic CFG
self._pre_process_cfg()
# Construct post-dominator tree
self._pd_construct()
self._graph = networkx.DiGraph()
# Construct the reversed dominance frontier mapping
rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom)
for y in self._cfg.graph.nodes():
if y not in rdf:
continue
for x in rdf[y]:
self._graph.add_edge(x, y)
|
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
|
def remove_all_servers(self):
"""
Remove all registered WBEM servers from the subscription manager. This
also unregisters listeners from these servers and removes all owned
indication subscriptions, owned indication filters, and owned listener
destinations.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
for server_id in list(self._servers.keys()):
self.remove_server(server_id)
|
Remove all registered WBEM servers from the subscription manager. This
also unregisters listeners from these servers and removes all owned
indication subscriptions, owned indication filters, and owned listener
destinations.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
|
def _generate_create_callable(name, display_name, arguments, regex, doc, supported, post_arguments, is_action):
"""
Returns a callable which conjures the URL for the resource and POSTs data
"""
def f(self, *args, **kwargs):
for key, value in args[-1].items():
if type(value) == file:
return self._put_or_post_multipart('POST', self._generate_url(regex, args[:-1]), args[-1])
return self._put_or_post_json('POST', self._generate_url(regex, args[:-1]), args[-1])
if is_action:
f.__name__ = str(name)
else:
f.__name__ = str('create_%s' % name)
f.__doc__ = doc
f._resource_uri = regex
f._get_args = arguments
f._put_or_post_args = post_arguments
f.resource_name = display_name
f.is_api_call = True
f.is_supported_api = supported
return f
|
Returns a callable which conjures the URL for the resource and POSTs data
|
def _year_expand(s):
""" Parses a year or dash-delimeted year range
"""
regex = r"^((?:19|20)\d{2})?(\s*-\s*)?((?:19|20)\d{2})?$"
try:
start, dash, end = match(regex, ustr(s)).groups()
start = start or 1900
end = end or 2099
except AttributeError:
return 1900, 2099
return (int(start), int(end)) if dash else (int(start), int(start))
|
Parses a year or dash-delimeted year range
|
def array_equiv(arr1, arr2):
"""Like np.array_equal, but also allows values to be NaN in both arrays
"""
arr1, arr2 = as_like_arrays(arr1, arr2)
if arr1.shape != arr2.shape:
return False
with warnings.catch_warnings():
warnings.filterwarnings('ignore', "In the future, 'NAT == x'")
flag_array = (arr1 == arr2)
flag_array |= (isnull(arr1) & isnull(arr2))
return bool(flag_array.all())
|
Like np.array_equal, but also allows values to be NaN in both arrays
|
def _reregister_tree_admin():
"""Forces unregistration of tree admin class with following re-registration."""
try:
admin.site.unregister(MODEL_TREE_CLASS)
except NotRegistered:
pass
admin.site.register(MODEL_TREE_CLASS, _TREE_ADMIN())
|
Forces unregistration of tree admin class with following re-registration.
|
def electric_field_amplitude_intensity(s0, Isat=16.6889462814,
Omega=1e6, units="ad-hoc"):
"""Return the amplitude of the electric field for saturation parameter.
This is at a given saturation parameter s0=I/Isat, where I0 is by default \
Isat=16.6889462814 m/m^2 is the saturation intensity of the D2 line of \
rubidium for circularly polarized light. Optionally, a frequency scale \
`Omega` can be provided.
>>> print(electric_field_amplitude_intensity(1.0, units="ad-hoc"))
9.0152984553
>>> print(electric_field_amplitude_intensity(1.0, Omega=1.0, units="SI"))
112.135917207
>>> print(electric_field_amplitude_intensity(1.0, units="SI"))
0.000112135917207
"""
E0_sat = sqrt(2*mu0*c*Isat)/Omega
if units == "ad-hoc":
e0 = hbar/(e*a0) # This is the electric field scale.
E0_sat = E0_sat/e0
return E0_sat*sqrt(s0)
|
Return the amplitude of the electric field for saturation parameter.
This is at a given saturation parameter s0=I/Isat, where I0 is by default \
Isat=16.6889462814 m/m^2 is the saturation intensity of the D2 line of \
rubidium for circularly polarized light. Optionally, a frequency scale \
`Omega` can be provided.
>>> print(electric_field_amplitude_intensity(1.0, units="ad-hoc"))
9.0152984553
>>> print(electric_field_amplitude_intensity(1.0, Omega=1.0, units="SI"))
112.135917207
>>> print(electric_field_amplitude_intensity(1.0, units="SI"))
0.000112135917207
|
def _attrib_to_transform(attrib):
"""
Extract a homogenous transform from a dictionary.
Parameters
------------
attrib: dict, optionally containing 'transform'
Returns
------------
transform: (4, 4) float, homogeonous transformation
"""
transform = np.eye(4, dtype=np.float64)
if 'transform' in attrib:
# wangle their transform format
values = np.array(
attrib['transform'].split(),
dtype=np.float64).reshape((4, 3)).T
transform[:3, :4] = values
return transform
|
Extract a homogenous transform from a dictionary.
Parameters
------------
attrib: dict, optionally containing 'transform'
Returns
------------
transform: (4, 4) float, homogeonous transformation
|
def expand_template(template, value):
"""
:param template: A UNICODE STRING WITH VARIABLE NAMES IN MOUSTACHES `{{.}}`
:param value: Data HOLDING THE PARAMTER VALUES
:return: UNICODE STRING WITH VARIABLES EXPANDED
"""
value = wrap(value)
if is_text(template):
return _simple_expand(template, (value,))
return _expand(template, (value,))
|
:param template: A UNICODE STRING WITH VARIABLE NAMES IN MOUSTACHES `{{.}}`
:param value: Data HOLDING THE PARAMTER VALUES
:return: UNICODE STRING WITH VARIABLES EXPANDED
|
def show_doc(elt, doc_string:bool=True, full_name:str=None, arg_comments:dict=None, title_level=None, alt_doc_string:str='',
ignore_warn:bool=False, markdown=True, show_tests=True):
"Show documentation for element `elt`. Supported types: class, Callable, and enum."
arg_comments = ifnone(arg_comments, {})
anchor_id = get_anchor(elt)
elt = getattr(elt, '__func__', elt)
full_name = full_name or fn_name(elt)
if inspect.isclass(elt):
if is_enum(elt.__class__): name,args = get_enum_doc(elt, full_name)
else: name,args = get_cls_doc(elt, full_name)
elif isinstance(elt, Callable): name,args = format_ft_def(elt, full_name)
else: raise Exception(f'doc definition not supported for {full_name}')
source_link = get_function_source(elt) if is_fastai_class(elt) else ""
test_link, test_modal = get_pytest_html(elt, anchor_id=anchor_id) if show_tests else ('', '')
title_level = ifnone(title_level, 2 if inspect.isclass(elt) else 4)
doc = f'<h{title_level} id="{anchor_id}" class="doc_header">{name}{source_link}{test_link}</h{title_level}>'
doc += f'\n\n> {args}\n\n'
doc += f'{test_modal}'
if doc_string and (inspect.getdoc(elt) or arg_comments):
doc += format_docstring(elt, arg_comments, alt_doc_string, ignore_warn) + ' '
if markdown: display(Markdown(doc))
else: return doc
|
Show documentation for element `elt`. Supported types: class, Callable, and enum.
|
def observable(
_method_or_viewset=None, poll_interval=None, primary_key=None, dependencies=None
):
"""Make ViewSet or ViewSet method observable.
Decorating a ViewSet class is the same as decorating its `list` method.
If decorated method returns a response containing a list of items, it must
use the provided `LimitOffsetPagination` for any pagination. In case a
non-list response is returned, the resulting item will be wrapped into a
list.
When multiple decorators are used, `observable` must be the first one to be
applied as it needs access to the method name.
:param poll_interval: Configure given observable as a polling observable
:param primary_key: Primary key for tracking observable items
:param dependencies: List of ORM to register as dependencies for
orm_notify. If None the observer will subscribe to notifications from
the queryset model.
"""
if poll_interval and dependencies:
raise ValueError('Only one of poll_interval and dependencies arguments allowed')
def decorator_observable(method_or_viewset):
if inspect.isclass(method_or_viewset):
list_method = getattr(method_or_viewset, 'list', None)
if list_method is not None:
method_or_viewset.list = observable(list_method)
return method_or_viewset
# Do not decorate an already observable method twice.
if getattr(method_or_viewset, 'is_observable', False):
return method_or_viewset
@functools.wraps(method_or_viewset)
def wrapper(self, request, *args, **kwargs):
if observer_request.OBSERVABLE_QUERY_PARAMETER in request.query_params:
# TODO: Validate the session identifier.
session_id = request.query_params[
observer_request.OBSERVABLE_QUERY_PARAMETER
]
# Create request and subscribe the session to given observer.
request = observer_request.Request(
self.__class__, method_or_viewset.__name__, request, args, kwargs
)
# Initialize observer and subscribe.
instance = observer.QueryObserver(request)
data = instance.subscribe(session_id, dependencies)
return response.Response({'observer': instance.id, 'items': data})
else:
# Non-reactive API.
return method_or_viewset(self, request, *args, **kwargs)
wrapper.is_observable = True
if poll_interval is not None:
wrapper.observable_change_detection = observer.Options.CHANGE_DETECTION_POLL
wrapper.observable_poll_interval = poll_interval
if primary_key is not None:
wrapper.observable_primary_key = primary_key
return wrapper
if _method_or_viewset is None:
return decorator_observable
else:
return decorator_observable(_method_or_viewset)
|
Make ViewSet or ViewSet method observable.
Decorating a ViewSet class is the same as decorating its `list` method.
If decorated method returns a response containing a list of items, it must
use the provided `LimitOffsetPagination` for any pagination. In case a
non-list response is returned, the resulting item will be wrapped into a
list.
When multiple decorators are used, `observable` must be the first one to be
applied as it needs access to the method name.
:param poll_interval: Configure given observable as a polling observable
:param primary_key: Primary key for tracking observable items
:param dependencies: List of ORM to register as dependencies for
orm_notify. If None the observer will subscribe to notifications from
the queryset model.
|
def create_ml_configuration_from_datasets(self, dataset_ids):
"""
Creates an ml configuration from dataset_ids and extract_as_keys
:param dataset_ids: Array of dataset identifiers to make search template from
:return: An identifier used to request the status of the builder job (get_ml_configuration_status)
"""
available_columns = self.search_template_client.get_available_columns(dataset_ids)
# Create a search template from dataset ids
search_template = self.search_template_client.create(dataset_ids, available_columns)
return self.create_ml_configuration(search_template, available_columns, dataset_ids)
|
Creates an ml configuration from dataset_ids and extract_as_keys
:param dataset_ids: Array of dataset identifiers to make search template from
:return: An identifier used to request the status of the builder job (get_ml_configuration_status)
|
def logout():
"""
Remove the authenticated user's ID from the request.
"""
from uliweb import request
delete_user_session()
request.session.delete()
request.user = None
return True
|
Remove the authenticated user's ID from the request.
|
def do_class(self, element, decl, pseudo):
"""Implement class declaration - pre-match."""
step = self.state[self.state['current_step']]
actions = step['actions']
strval = self.eval_string_value(element, decl.value)
actions.append(('attrib', ('class', strval)))
|
Implement class declaration - pre-match.
|
def _purge_expired(self):
"""
Remove all expired entries from the cache.
"""
time_horizon = time.time() - self._keep_time
new_cache = {}
for (k, v) in self._cache.items():
if v.timestamp > time_horizon:
new_cache[k] = v
self._cache = new_cache
|
Remove all expired entries from the cache.
|
def repeat(self, n=2, oscillate=False, callback=None):
"""
Returns a list that is a repetition of the given list.
When oscillate is True,
moves from the end back to the beginning,
and then from the beginning to the end, and so on.
"""
colorlist = ColorList()
colors = ColorList.copy(self)
for i in _range(n):
colorlist.extend(colors)
if oscillate: colors = colors.reverse()
if callback: colors = callback(colors)
return colorlist
|
Returns a list that is a repetition of the given list.
When oscillate is True,
moves from the end back to the beginning,
and then from the beginning to the end, and so on.
|
def tofile(self, f):
"""Serialize this ScalableBloomFilter into the file-object
`f'."""
f.write(pack(self.FILE_FMT, self.scale, self.ratio,
self.initial_capacity, self.error_rate))
# Write #-of-filters
f.write(pack(b'<l', len(self.filters)))
if len(self.filters) > 0:
# Then each filter directly, with a header describing
# their lengths.
headerpos = f.tell()
headerfmt = b'<' + b'Q'*(len(self.filters))
f.write(b'.' * calcsize(headerfmt))
filter_sizes = []
for filter in self.filters:
begin = f.tell()
filter.tofile(f)
filter_sizes.append(f.tell() - begin)
f.seek(headerpos)
f.write(pack(headerfmt, *filter_sizes))
|
Serialize this ScalableBloomFilter into the file-object
`f'.
|
def fix_empty_methods(source):
"""
Appends 'pass' to empty methods/functions (i.e. where there was nothing but
a docstring before we removed it =).
Example::
# Note: This triple-single-quote inside a triple-double-quote is also a
# pyminifier self-test
def myfunc():
'''This is just a placeholder function.'''
Will become::
def myfunc(): pass
"""
def_indentation_level = 0
output = ""
just_matched = False
previous_line = None
method = re.compile(r'^\s*def\s*.*\(.*\):.*$')
for line in source.split('\n'):
if len(line.strip()) > 0: # Don't look at blank lines
if just_matched == True:
this_indentation_level = len(line.rstrip()) - len(line.strip())
if def_indentation_level == this_indentation_level:
# This method is empty, insert a 'pass' statement
indent = " " * (def_indentation_level + 1)
output += "%s\n%spass\n%s\n" % (previous_line, indent, line)
else:
output += "%s\n%s\n" % (previous_line, line)
just_matched = False
elif method.match(line):
def_indentation_level = len(line) - len(line.strip())
just_matched = True
previous_line = line
else:
output += "%s\n" % line # Another self-test
else:
output += "\n"
return output
|
Appends 'pass' to empty methods/functions (i.e. where there was nothing but
a docstring before we removed it =).
Example::
# Note: This triple-single-quote inside a triple-double-quote is also a
# pyminifier self-test
def myfunc():
'''This is just a placeholder function.'''
Will become::
def myfunc(): pass
|
def call(command, working_directory=config.BASE_DIR):
"""
Executes shell command in a given working_directory.
Command is a list of strings to execute as a command line.
Returns a tuple of two byte strings: (stdout, stderr)
"""
LOG.info(command)
proc = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, cwd=working_directory, shell=True)
out, err = proc.communicate()
return (out, err)
|
Executes shell command in a given working_directory.
Command is a list of strings to execute as a command line.
Returns a tuple of two byte strings: (stdout, stderr)
|
def new_cast_status(self, status):
""" Called when a new status received from the Chromecast. """
self.status = status
if status:
self.status_event.set()
|
Called when a new status received from the Chromecast.
|
def get_all_children(self, include_self=False):
"""
Return all subsidiaries of this company.
"""
ownership = Ownership.objects.filter(parent=self)
subsidiaries = Company.objects.filter(child__in=ownership)
for sub in subsidiaries:
subsidiaries = subsidiaries | sub.get_all_children()
if include_self is True:
self_company = Company.objects.filter(id=self.id)
subsidiaries = subsidiaries | self_company
return subsidiaries
|
Return all subsidiaries of this company.
|
def fetch_action_restriction(self, reftrack, action):
"""Return wheter the given action is restricted for the given reftrack
available actions are:
``reference``, ``load``, ``unload``, ``replace``, ``import_reference``, ``import_taskfile``, ``delete``
If action is not available, True is returned.
:param reftrack: the reftrack to query
:type reftrack: :class:`Reftrack`
:param action: the action to check.
:type action: str
:returns: True, if the action is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_typ_interface(reftrack.get_typ())
d = {'reference': inter.is_reference_restricted, 'load': inter.is_load_restricted,
'unload': inter.is_unload_restricted, 'replace': inter.is_replace_restricted,
'import_reference': inter.is_import_ref_restricted, 'import_taskfile': inter.is_import_f_restricted,
'delete': inter.is_delete_restricted,}
f = d.get(action, None)
if not f:
return True
else:
return f(reftrack)
|
Return wheter the given action is restricted for the given reftrack
available actions are:
``reference``, ``load``, ``unload``, ``replace``, ``import_reference``, ``import_taskfile``, ``delete``
If action is not available, True is returned.
:param reftrack: the reftrack to query
:type reftrack: :class:`Reftrack`
:param action: the action to check.
:type action: str
:returns: True, if the action is restricted
:rtype: :class:`bool`
:raises: None
|
def getSignature(self, signatureKey, serialized):
"""
:type signatureKey: ECPrivateKey
:type serialized: bytearray
"""
try:
return Curve.calculateSignature(signatureKey, serialized)
except InvalidKeyException as e:
raise AssertionError(e)
|
:type signatureKey: ECPrivateKey
:type serialized: bytearray
|
def _constructClassificationRecord(self, inputs):
"""
Construct a _HTMClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TM active cells in learn state
2 - SP columns concatenated with error from TM column predictions and SP
"""
# Count the number of unpredicted columns
allSPColumns = inputs["spBottomUpOut"]
activeSPColumns = allSPColumns.nonzero()[0]
score = anomaly.computeRawAnomalyScore(activeSPColumns,
self._prevPredictedColumns)
spSize = len(allSPColumns)
allTPCells = inputs['tpTopDownOut']
tpSize = len(inputs['tpLrnActiveStateT'])
classificationVector = numpy.array([])
if self.classificationVectorType == 1:
# Classification Vector: [---TM Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self.classificationVectorType == 2:
# Classification Vecotr: [---SP---|---(TM-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeSPColumns.shape[0] > 0:
classificationVector[activeSPColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
activeSPColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self.classificationVectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = allTPCells.nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=self._iteration, #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result
|
Construct a _HTMClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TM active cells in learn state
2 - SP columns concatenated with error from TM column predictions and SP
|
def assert_condition_md5(self):
"""If the ``Content-MD5`` request header is present in the request
it's verified against the MD5 hash of the request body. If they don't
match, a 400 HTTP response is returned.
:raises: :class:`webob.exceptions.ResponseException` of status 400 if
the MD5 hash does not match the body.
"""
if 'Content-MD5' in self.request.headers:
body_md5 = hashlib.md5(self.request.body).hexdigest()
if body_md5 != self.request.headers['Content-MD5']:
raise_400(self, msg='Invalid Content-MD5 request header.')
|
If the ``Content-MD5`` request header is present in the request
it's verified against the MD5 hash of the request body. If they don't
match, a 400 HTTP response is returned.
:raises: :class:`webob.exceptions.ResponseException` of status 400 if
the MD5 hash does not match the body.
|
def predict_condition_models(self, model_names,
input_columns,
metadata_cols,
data_mode="forecast",
):
"""
Apply condition modelsto forecast data.
Args:
model_names: List of names associated with each condition model used for prediction
input_columns: List of columns in data used as input into the model
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Which data subset to pull from for the predictions, "forecast" by default
Returns:
A dictionary of data frames containing probabilities of the event and specified metadata
"""
groups = self.condition_models.keys()
predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols])
for group in groups:
print(group)
print(self.condition_models[group])
g_idxs = self.data[data_mode]["combo"][self.group_col] == group
group_count = np.count_nonzero(g_idxs)
if group_count > 0:
for m, model_name in enumerate(model_names):
mn = model_name.replace(" ", "-")
predictions.loc[g_idxs, mn + "_conditionprob"] = self.condition_models[group][
model_name].predict_proba(
self.data[data_mode]["combo"].loc[g_idxs, input_columns])[:, 1]
predictions.loc[g_idxs,
mn + "_conditionthresh"] = np.where(predictions.loc[g_idxs, mn + "_conditionprob"]
>= self.condition_models[group][
model_name + "_condition_threshold"], 1, 0)
return predictions
|
Apply condition modelsto forecast data.
Args:
model_names: List of names associated with each condition model used for prediction
input_columns: List of columns in data used as input into the model
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Which data subset to pull from for the predictions, "forecast" by default
Returns:
A dictionary of data frames containing probabilities of the event and specified metadata
|
def set_state(self, entity_id, new_state, **kwargs):
"Updates or creates the current state of an entity."
return remote.set_state(self.api, new_state, **kwargs)
|
Updates or creates the current state of an entity.
|
def database_remove_tags(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /database-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
"""
return DXHTTPRequest('/%s/removeTags' % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /database-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
|
def refresh_db(**kwargs):
'''
Update list of available packages from installed repos
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
cmd = 'xbps-install -Sy'
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
raise CommandExecutionError(comment)
return True
|
Update list of available packages from installed repos
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
|
def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
If no `sk_file` is defined return a keypair.
If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`.
When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated
using the `sk_file`.
CLI Examples:
.. code-block:: bash
salt-call nacl.keygen
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
'''
kwargs['opts'] = __opts__
return salt.utils.nacl.keygen(sk_file, pk_file, **kwargs)
|
Use libnacl to generate a keypair.
If no `sk_file` is defined return a keypair.
If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`.
When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated
using the `sk_file`.
CLI Examples:
.. code-block:: bash
salt-call nacl.keygen
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
|
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
cmd = ['pacman', '-Q']
if 'root' in kwargs:
cmd.extend(('-r', kwargs['root']))
ret = {}
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
if not line:
continue
try:
name, version_num = line.split()[0:2]
except ValueError:
log.error('Problem parsing pacman -Q: Unexpected formatting in '
'line: \'%s\'', line)
else:
__salt__['pkg_resource.add_pkg'](ret, name, version_num)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
|
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
|
def old_unpad(s):
"""
Removes padding from an input string based on a given block size.
:param s: string
:returns: The unpadded string.
"""
if not s:
return s
try:
return Padding.removePadding(s, blocksize=OLD_BLOCK_SIZE)
except AssertionError:
# if there's an error while removing padding, just return s.
return s
|
Removes padding from an input string based on a given block size.
:param s: string
:returns: The unpadded string.
|
def monitor_deletion():
"""
Function for checking for correct deletion of weakref-able objects.
Example usage::
monitor, is_alive = monitor_deletion()
obj = set()
monitor(obj, "obj")
assert is_alive("obj") # True because there is a ref to `obj` is_alive
del obj
assert not is_alive("obj") # True because there `obj` is deleted
"""
monitors = {}
def set_deleted(x):
def _(weakref):
del monitors[x]
return _
def monitor(item, name):
monitors[name] = ref(item, set_deleted(name))
def is_alive(name):
return monitors.get(name, None) is not None
return monitor, is_alive
|
Function for checking for correct deletion of weakref-able objects.
Example usage::
monitor, is_alive = monitor_deletion()
obj = set()
monitor(obj, "obj")
assert is_alive("obj") # True because there is a ref to `obj` is_alive
del obj
assert not is_alive("obj") # True because there `obj` is deleted
|
def remove_bad_sequence(codon_list, bad_seq, bad_seqs):
"""
Make a silent mutation to the given codon list to remove the first instance
of the given bad sequence found in the gene sequence. If the bad sequence
isn't found, nothing happens and the function returns false. Otherwise the
function returns true. You can use these return values to easily write a
loop totally purges the bad sequence from the codon list. Both the
specific bad sequence in question and the list of all bad sequences are
expected to be regular expressions.
"""
gene_seq = ''.join(codon_list)
problem = bad_seq.search(gene_seq)
if not problem:
return False
bs_start_codon = problem.start() // 3
bs_end_codon = problem.end() // 3
for i in range(bs_start_codon, bs_end_codon):
problem_codon = codon_list[i]
amino_acid = translate_dna(problem_codon)
alternate_codons = [
codon
for codon in dna.ecoli_reverse_translate[amino_acid]
if codon != problem_codon]
for alternate_codon in alternate_codons:
codon_list[i] = alternate_codon
if problem_with_codon(i, codon_list, bad_seqs):
codon_list[i] = problem_codon
else:
return True
raise RuntimeError("Could not remove bad sequence '{}' from gene.".format(bs))
|
Make a silent mutation to the given codon list to remove the first instance
of the given bad sequence found in the gene sequence. If the bad sequence
isn't found, nothing happens and the function returns false. Otherwise the
function returns true. You can use these return values to easily write a
loop totally purges the bad sequence from the codon list. Both the
specific bad sequence in question and the list of all bad sequences are
expected to be regular expressions.
|
def create_border(self, border_style_type):
"""
Create a new MenuBorderStyle instance based on the given border style type.
Args:
border_style_type (int): an integer value from :obj:`MenuBorderStyleType`.
Returns:
:obj:`MenuBorderStyle`: a new MenuBorderStyle instance of the specified style.
"""
if border_style_type == MenuBorderStyleType.ASCII_BORDER:
return self.create_ascii_border()
elif border_style_type == MenuBorderStyleType.LIGHT_BORDER:
return self.create_light_border()
elif border_style_type == MenuBorderStyleType.HEAVY_BORDER:
return self.create_heavy_border()
elif border_style_type == MenuBorderStyleType.DOUBLE_LINE_BORDER:
return self.create_doubleline_border()
elif border_style_type == MenuBorderStyleType.HEAVY_OUTER_LIGHT_INNER_BORDER:
return self.create_heavy_outer_light_inner_border()
elif border_style_type == MenuBorderStyleType.DOUBLE_LINE_OUTER_LIGHT_INNER_BORDER:
return self.create_doubleline_outer_light_inner_border()
else:
# Use ASCII if we don't recognize the type
self.logger.info('Unrecognized border style type: {}. Defaulting to ASCII.'.format(border_style_type))
return self.create_ascii_border()
|
Create a new MenuBorderStyle instance based on the given border style type.
Args:
border_style_type (int): an integer value from :obj:`MenuBorderStyleType`.
Returns:
:obj:`MenuBorderStyle`: a new MenuBorderStyle instance of the specified style.
|
def list_kubernetes_roles(self, mount_point='kubernetes'):
"""GET /auth/<mount_point>/role?list=true
:param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes".
:type mount_point: str.
:return: Parsed JSON response from the list roles GET request.
:rtype: dict.
"""
url = 'v1/auth/{0}/role?list=true'.format(mount_point)
return self._adapter.get(url).json()
|
GET /auth/<mount_point>/role?list=true
:param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes".
:type mount_point: str.
:return: Parsed JSON response from the list roles GET request.
:rtype: dict.
|
def lookup(
name,
rdtype,
method=None,
servers=None,
timeout=None,
walk=False,
walk_tld=False,
secure=None
):
'''
Lookup DNS records and return their data
:param name: name to lookup
:param rdtype: DNS record type
:param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)
:param servers: (list of) server(s) to try in-order
:param timeout: query timeout or a valiant approximation of that
:param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.
:param walk_tld: Include the final domain in the walk
:param secure: return only DNSSEC secured responses
:return: [] of record data
'''
# opts = __opts__.get('dns', {})
opts = {}
method = method or opts.get('method', 'auto')
secure = secure or opts.get('secure', None)
servers = servers or opts.get('servers', None)
timeout = timeout or opts.get('timeout', False)
rdtype = rdtype.upper()
# pylint: disable=bad-whitespace,multiple-spaces-before-keyword
query_methods = (
('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))),
('dnspython', _lookup_dnspython, HAS_DNSPYTHON),
('dig', _lookup_dig, HAS_DIG),
('drill', _lookup_drill, HAS_DRILL),
('host', _lookup_host, HAS_HOST and not secure),
('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure),
)
# pylint: enable=bad-whitespace,multiple-spaces-before-keyword
try:
if method == 'auto':
# The first one not to bork on the conditions becomes the function
method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest))
else:
# The first one not to bork on the conditions becomes the function. And the name must match.
resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest))
except StopIteration:
log.error(
'Unable to lookup %s/%s: Resolver method %s invalid, unsupported '
'or unable to perform query', method, rdtype, name
)
return False
res_kwargs = {
'rdtype': rdtype,
}
if servers:
if not isinstance(servers, (list, tuple)):
servers = [servers]
if method in ('dnspython', 'dig', 'drill'):
res_kwargs['servers'] = servers
else:
if timeout:
timeout /= len(servers)
# Inject a wrapper for multi-server behaviour
def _multi_srvr(resolv_func):
@functools.wraps(resolv_func)
def _wrapper(**res_kwargs):
for server in servers:
s_res = resolv_func(server=server, **res_kwargs)
if s_res:
return s_res
return _wrapper
resolver = _multi_srvr(resolver)
if not walk:
name = [name]
else:
idx = 0
if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components
idx = name.find('.') + 1
idx = name.find('.', idx) + 1
domain = name[idx:]
rname = name[0:idx]
name = _tree(domain, walk_tld)
if walk == 'name':
name = [rname + domain for domain in name]
if timeout:
timeout /= len(name)
if secure:
res_kwargs['secure'] = secure
if timeout:
res_kwargs['timeout'] = timeout
for rname in name:
res = resolver(name=rname, **res_kwargs)
if res:
return res
return res
|
Lookup DNS records and return their data
:param name: name to lookup
:param rdtype: DNS record type
:param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)
:param servers: (list of) server(s) to try in-order
:param timeout: query timeout or a valiant approximation of that
:param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.
:param walk_tld: Include the final domain in the walk
:param secure: return only DNSSEC secured responses
:return: [] of record data
|
def read_hier_references(jams_file, annotation_id=0, exclude_levels=[]):
"""Reads hierarchical references from a jams file.
Parameters
----------
jams_file : str
Path to the jams file.
annotation_id : int > 0
Identifier of the annotator to read from.
exclude_levels: list
List of levels to exclude. Empty list to include all levels.
Returns
-------
hier_bounds : list
List of the segment boundary times in seconds for each level.
hier_labels : list
List of the segment labels for each level.
hier_levels : list
List of strings for the level identifiers.
"""
hier_bounds = []
hier_labels = []
hier_levels = []
jam = jams.load(jams_file)
namespaces = ["segment_salami_upper", "segment_salami_function",
"segment_open", "segment_tut", "segment_salami_lower"]
# Remove levels if needed
for exclude in exclude_levels:
if exclude in namespaces:
namespaces.remove(exclude)
# Build hierarchy references
for ns in namespaces:
ann = jam.search(namespace=ns)
if not ann:
continue
ref_inters, ref_labels = ann[annotation_id].to_interval_values()
hier_bounds.append(utils.intervals_to_times(ref_inters))
hier_labels.append(ref_labels)
hier_levels.append(ns)
return hier_bounds, hier_labels, hier_levels
|
Reads hierarchical references from a jams file.
Parameters
----------
jams_file : str
Path to the jams file.
annotation_id : int > 0
Identifier of the annotator to read from.
exclude_levels: list
List of levels to exclude. Empty list to include all levels.
Returns
-------
hier_bounds : list
List of the segment boundary times in seconds for each level.
hier_labels : list
List of the segment labels for each level.
hier_levels : list
List of strings for the level identifiers.
|
def getWidget(self):
"""Some ideas for your widget:
- Textual information (alert, license place number)
- Check boxes : if checked, send e-mail to your mom when the analyzer spots something
- .. or send an sms to yourself
- You can include the cv2.imshow window to the widget to see how the analyzer proceeds
"""
widget = QtWidgets.QLabel("NO MOVEMENT YET")
widget.setStyleSheet(style.detector_test)
self.signals.start_move.connect(lambda : widget.setText("MOVEMENT START"))
self.signals.stop_move. connect(lambda : widget.setText("MOVEMENT STOP"))
return widget
|
Some ideas for your widget:
- Textual information (alert, license place number)
- Check boxes : if checked, send e-mail to your mom when the analyzer spots something
- .. or send an sms to yourself
- You can include the cv2.imshow window to the widget to see how the analyzer proceeds
|
def get_subfields(self, datafield, subfield, i1=None, i2=None,
exception=False):
"""
Return content of given `subfield` in `datafield`.
Args:
datafield (str): Section name (for example "001", "100", "700").
subfield (str): Subfield name (for example "a", "1", etc..).
i1 (str, default None): Optional i1/ind1 parameter value, which
will be used for search.
i2 (str, default None): Optional i2/ind2 parameter value, which
will be used for search.
exception (bool): If ``True``, :exc:`~exceptions.KeyError` is
raised when method couldn't found given `datafield` /
`subfield`. If ``False``, blank array ``[]`` is returned.
Returns:
list: of :class:`.MARCSubrecord`.
Raises:
KeyError: If the subfield or datafield couldn't be found.
Note:
MARCSubrecord is practically same thing as string, but has defined
:meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2`
methods.
You may need to be able to get this, because MARC XML depends on
i/ind parameters from time to time (names of authors for example).
"""
if len(datafield) != 3:
raise ValueError(
"`datafield` parameter have to be exactly 3 chars long!"
)
if len(subfield) != 1:
raise ValueError(
"Bad subfield specification - subfield have to be 1 char long!"
)
# if datafield not found, return or raise exception
if datafield not in self.datafields:
if exception:
raise KeyError(datafield + " is not in datafields!")
return []
# look for subfield defined by `subfield`, `i1` and `i2` parameters
output = []
for datafield in self.datafields[datafield]:
if subfield not in datafield:
continue
# records are not returned just like plain string, but like
# MARCSubrecord, because you will need ind1/ind2 values
for sfield in datafield[subfield]:
if i1 and sfield.i1 != i1:
continue
if i2 and sfield.i2 != i2:
continue
output.append(sfield)
if not output and exception:
raise KeyError(subfield + " couldn't be found in subfields!")
return output
|
Return content of given `subfield` in `datafield`.
Args:
datafield (str): Section name (for example "001", "100", "700").
subfield (str): Subfield name (for example "a", "1", etc..).
i1 (str, default None): Optional i1/ind1 parameter value, which
will be used for search.
i2 (str, default None): Optional i2/ind2 parameter value, which
will be used for search.
exception (bool): If ``True``, :exc:`~exceptions.KeyError` is
raised when method couldn't found given `datafield` /
`subfield`. If ``False``, blank array ``[]`` is returned.
Returns:
list: of :class:`.MARCSubrecord`.
Raises:
KeyError: If the subfield or datafield couldn't be found.
Note:
MARCSubrecord is practically same thing as string, but has defined
:meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2`
methods.
You may need to be able to get this, because MARC XML depends on
i/ind parameters from time to time (names of authors for example).
|
def search(self, **kwargs):
"""
Method to search ipv6's based on extends search.
:param search: Dict containing QuerySets to find ipv6's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv6's
"""
return super(ApiNetworkIPv6, self).get(self.prepare_url('api/v3/networkv6/',
kwargs))
|
Method to search ipv6's based on extends search.
:param search: Dict containing QuerySets to find ipv6's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv6's
|
def receive_callback(request):
"""
Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url.
"""
logger.debug("Received callback for {0} session {1}".format(request.user, request.session.session_key[:5]))
# make sure request has required parameters
code = request.GET.get('code', None)
state = request.GET.get('state', None)
try:
assert code
assert state
except AssertionError:
logger.debug("Missing parameters for code exchange.")
return HttpResponseBadRequest()
callback = get_object_or_404(CallbackRedirect, state=state, session_key=request.session.session_key)
token = Token.objects.create_from_request(request)
callback.token = token
callback.save()
logger.debug(
"Processed callback for {0} session {1}. Redirecting to {2}".format(request.user, request.session.session_key[:5], callback.url))
return redirect(callback.url)
|
Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url.
|
def read_gene2acc(file_path, logger):
"""Extracts Entrez ID -> gene symbol mapping from gene2accession.gz file.
Parameters
----------
file_path: str
The path of the gene2accession.gz file (or a filtered version thereof).
The file may be gzip'ed.
Returns
-------
dict
A mapping of Entrez IDs to gene symbols.
"""
gene2acc = {}
with misc.smart_open_read(file_path, mode='rb', try_gzip=True) as fh:
reader = csv.reader(fh, dialect='excel-tab')
next(reader) # skip header
for i, l in enumerate(reader):
id_ = int(l[1])
symbol = l[15]
try:
gene2acc[id_].append(symbol)
except KeyError:
gene2acc[id_] = [symbol]
# print (l[0],l[15])
# make sure all EntrezIDs map to a unique gene symbol
n = len(gene2acc)
for k, v in gene2acc.items():
symbols = sorted(set(v))
assert len(symbols) == 1
gene2acc[k] = symbols[0]
all_symbols = sorted(set(gene2acc.values()))
m = len(all_symbols)
logger.info('Found %d Entrez Gene IDs associated with %d gene symbols.',
n, m)
return gene2acc
|
Extracts Entrez ID -> gene symbol mapping from gene2accession.gz file.
Parameters
----------
file_path: str
The path of the gene2accession.gz file (or a filtered version thereof).
The file may be gzip'ed.
Returns
-------
dict
A mapping of Entrez IDs to gene symbols.
|
def merge_duplicates(self):
"""Merge and remove duplicate entries.
Compares each entry ('name') in `stubs` to all later entries to check
for duplicates in name or alias. If a duplicate is found, they are
merged and written to file.
"""
if len(self.entries) == 0:
self.log.error("WARNING: `entries` is empty, loading stubs")
if self.args.update:
self.log.warning(
"No sources changed, entry files unchanged in update."
" Skipping merge.")
return
self.entries = self.load_stubs()
task_str = self.get_current_task_str()
keys = list(sorted(self.entries.keys()))
n1 = 0
mainpbar = tqdm(total=len(keys), desc=task_str)
while n1 < len(keys):
name1 = keys[n1]
if name1 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name1))
n1 = n1 + 1
mainpbar.update(1)
continue
allnames1 = set(self.entries[name1].get_aliases() + self.entries[
name1].extra_aliases())
# Search all later names
for name2 in keys[n1 + 1:]:
if name1 == name2:
continue
if name1 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name1))
continue
if name2 not in self.entries:
self.log.info("Entry for {} not found, likely already "
"deleted in merging process.".format(name2))
continue
allnames2 = set(self.entries[name2].get_aliases() +
self.entries[name2].extra_aliases())
# If there are any common names or aliases, merge
if len(allnames1 & allnames2):
self.log.warning("Found two entries with common aliases "
"('{}' and '{}'), merging.".format(name1,
name2))
load1 = self.proto.init_from_file(self, name=name1)
load2 = self.proto.init_from_file(self, name=name2)
if load1 is not None and load2 is not None:
# Delete old files
self._delete_entry_file(entry=load1)
self._delete_entry_file(entry=load2)
self.entries[name1] = load1
self.entries[name2] = load2
priority1 = 0
priority2 = 0
for an in allnames1:
if an.startswith(self.entries[name1]
.priority_prefixes()):
priority1 += 1
for an in allnames2:
if an.startswith(self.entries[name2]
.priority_prefixes()):
priority2 += 1
if priority1 > priority2:
self.copy_to_entry_in_catalog(name2, name1)
keys.append(name1)
del self.entries[name2]
else:
self.copy_to_entry_in_catalog(name1, name2)
keys.append(name2)
del self.entries[name1]
else:
self.log.warning('Duplicate already deleted')
# if len(self.entries) != 1:
# self.log.error(
# "WARNING: len(entries) = {}, expected 1. "
# "Still journaling...".format(len(self.entries)))
self.journal_entries()
if self.args.travis and n1 > self.TRAVIS_QUERY_LIMIT:
break
n1 = n1 + 1
mainpbar.update(1)
mainpbar.close()
|
Merge and remove duplicate entries.
Compares each entry ('name') in `stubs` to all later entries to check
for duplicates in name or alias. If a duplicate is found, they are
merged and written to file.
|
def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None):
'''
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts need to add any VSAN-eligible disks to the host's
VSAN system.
If host_names is not provided, VSAN-eligible disks will be added to the hosts's
VSAN system for the ``host`` location instead. This is useful for when service
instance connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {}
for host_name, value in six.iteritems(response):
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None:
msg = 'VSAN System Config Manager is unset for host \'{0}\'. ' \
'VSAN configuration cannot be changed without a configured ' \
'VSAN System.'.format(host_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
else:
eligible = value.get('Eligible')
error = value.get('Error')
if eligible and isinstance(eligible, list):
# If we have eligible, matching disks, add them to VSAN.
try:
task = vsan_system.AddDisks(eligible)
salt.utils.vmware.wait_for_task(task, host_name, 'Adding disks to VSAN', sleep_seconds=3)
except vim.fault.InsufficientDisks as err:
log.debug(err.msg)
ret.update({host_name: {'Error': err.msg}})
continue
except Exception as err:
msg = '\'vsphere.vsan_add_disks\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
log.debug(
'Successfully added disks to the VSAN system for host \'%s\'.',
host_name
)
# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.
disk_names = []
for disk in eligible:
disk_names.append(disk.canonicalName)
ret.update({host_name: {'Disks Added': disk_names}})
elif eligible and isinstance(eligible, six.string_types):
# If we have a string type in the eligible value, we don't
# have any VSAN-eligible disks. Pull the message through.
ret.update({host_name: {'Disks Added': eligible}})
elif error:
# If we hit an error, populate the Error return dict for state functions.
ret.update({host_name: {'Error': error}})
else:
# If we made it this far, we somehow have eligible disks, but they didn't
# match the disk list and just got an empty list of matching disks.
ret.update({host_name: {'Disks Added': 'No new VSAN-eligible disks were found to add.'}})
return ret
|
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts need to add any VSAN-eligible disks to the host's
VSAN system.
If host_names is not provided, VSAN-eligible disks will be added to the hosts's
VSAN system for the ``host`` location instead. This is useful for when service
instance connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
|
def transform(self, data):
"""
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
Examples
--------
.. sourcecode:: python
>> my_tr = turicreate.feature_engineering.create(train_data, MyTransformer())
>> transformed_sf = my_tr.transform(sf)
"""
transformed_data = _copy(data)
for name, step in self._transformers:
transformed_data = step.transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise TypeError("The transform function in step '%s' did not return"
" an SFrame." % name)
return transformed_data
|
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
Examples
--------
.. sourcecode:: python
>> my_tr = turicreate.feature_engineering.create(train_data, MyTransformer())
>> transformed_sf = my_tr.transform(sf)
|
def issubset(self, other):
"""Report whether another set contains this RangeSet."""
self._binary_sanity_check(other)
return set.issubset(self, other)
|
Report whether another set contains this RangeSet.
|
def old_status(self, old_status):
"""
Sets the old_status of this BuildSetStatusChangedEvent.
:param old_status: The old_status of this BuildSetStatusChangedEvent.
:type: str
"""
allowed_values = ["NEW", "DONE", "REJECTED"]
if old_status not in allowed_values:
raise ValueError(
"Invalid value for `old_status` ({0}), must be one of {1}"
.format(old_status, allowed_values)
)
self._old_status = old_status
|
Sets the old_status of this BuildSetStatusChangedEvent.
:param old_status: The old_status of this BuildSetStatusChangedEvent.
:type: str
|
def delete_service(self, name):
"""
Delete a service by name.
@param name: Service name
@return: The deleted ApiService object
"""
return services.delete_service(self._get_resource_root(), name, self.name)
|
Delete a service by name.
@param name: Service name
@return: The deleted ApiService object
|
def dirs(self, *args, **kwargs):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are Path objects.
This does not walk recursively into subdirectories
(but see :meth:`walkdirs`).
Accepts parameters to :meth:`listdir`.
"""
return [p for p in self.listdir(*args, **kwargs) if p.isdir()]
|
D.dirs() -> List of this directory's subdirectories.
The elements of the list are Path objects.
This does not walk recursively into subdirectories
(but see :meth:`walkdirs`).
Accepts parameters to :meth:`listdir`.
|
def _build_ip_constraints(roles, ips, constraints):
"""Generate the constraints at the ip/device level.
Those constraints are those used by ansible to enforce tc/netem rules.
"""
local_ips = copy.deepcopy(ips)
for constraint in constraints:
gsrc = constraint['src']
gdst = constraint['dst']
gdelay = constraint['delay']
grate = constraint['rate']
gloss = constraint['loss']
for s in roles[gsrc]:
# one possible source
# Get all the active devices for this source
active_devices = filter(lambda x: x["active"],
local_ips[s.alias]['devices'])
# Get only the devices specified in the network constraint
if 'network' in constraint:
active_devices = filter(
lambda x:
x['device'] == s.extra[constraint['network']],
active_devices)
# Get only the name of the active devices
sdevices = map(lambda x: x['device'], active_devices)
for sdevice in sdevices:
# one possible device
for d in roles[gdst]:
# one possible destination
dallips = local_ips[d.alias]['all_ipv4_addresses']
# Let's keep docker bridge out of this
dallips = filter(lambda x: x != '172.17.0.1', dallips)
for dip in dallips:
local_ips[s.alias].setdefault('tc', []).append({
'source': s.alias,
'target': dip,
'device': sdevice,
'delay': gdelay,
'rate': grate,
'loss': gloss
})
return local_ips
|
Generate the constraints at the ip/device level.
Those constraints are those used by ansible to enforce tc/netem rules.
|
def _node_add_with_peer_leaflist(self, child_self, child_other):
'''_node_add_with_peer_leaflist
Low-level api: Apply delta child_other to child_self when child_self is
the peer of child_other. Element child_self and child_other are
leaf-list nodes. Element child_self will be modified during the process.
RFC6020 section 7.7.7 is a reference of this method.
Parameters
----------
child_self : `Element`
A child of a config node in a config tree.
child_other : `Element`
A child of a config node in another config tree. child_self is
the peer of child_other.
Returns
-------
None
There is no return of this method.
'''
parent_self = child_self.getparent()
s_node = self.device.get_schema_node(child_self)
if child_other.get(operation_tag) is None or \
child_other.get(operation_tag) == 'merge' or \
child_other.get(operation_tag) == 'replace':
if s_node.get('ordered-by') == 'user' and \
child_other.get(insert_tag) is not None:
if child_other.get(insert_tag) == 'first':
scope = parent_self.getchildren()
siblings = self._get_sequence(scope, child_other.tag,
parent_self)
if siblings[0] != child_self:
siblings[0].addprevious(child_self)
elif child_other.get(insert_tag) == 'last':
scope = parent_self.getchildren()
siblings = self._get_sequence(scope, child_other.tag,
parent_self)
if siblings[-1] != child_self:
siblings[-1].addnext(child_self)
elif child_other.get(insert_tag) == 'before':
if child_other.get(value_tag) is None:
_inserterror('before',
self.device.get_xpath(child_other),
'value')
siblings = parent_self.findall(child_other.tag)
sibling = [s for s in siblings
if s.text == child_other.get(value_tag)]
if not sibling:
path = self.device.get_xpath(child_other)
value = child_other.get(value_tag)
_inserterror('before', path, 'value', value)
if sibling[0] != child_self:
sibling[0].addprevious(child_self)
elif child_other.get(insert_tag) == 'after':
if child_other.get(value_tag) is None:
_inserterror('after',
self.device.get_xpath(child_other),
'value')
siblings = parent_self.findall(child_other.tag)
sibling = [s for s in siblings
if s.text == child_other.get(value_tag)]
if not sibling:
path = self.device.get_xpath(child_other)
value = child_other.get(value_tag)
_inserterror('after', path, 'value', value)
if sibling[0] != child_self:
sibling[0].addnext(child_self)
elif child_other.get(operation_tag) == 'create':
raise ConfigDeltaError('data-exists: try to create node {} but ' \
'it already exists' \
.format(self.device.get_xpath(child_other)))
elif child_other.get(operation_tag) == 'delete' or \
child_other.get(operation_tag) == 'remove':
parent_self.remove(child_self)
else:
raise ConfigDeltaError("unknown operation: node {} contains " \
"operation '{}'" \
.format(self.device.get_xpath(child_other),
child_other.get(operation_tag)))
|
_node_add_with_peer_leaflist
Low-level api: Apply delta child_other to child_self when child_self is
the peer of child_other. Element child_self and child_other are
leaf-list nodes. Element child_self will be modified during the process.
RFC6020 section 7.7.7 is a reference of this method.
Parameters
----------
child_self : `Element`
A child of a config node in a config tree.
child_other : `Element`
A child of a config node in another config tree. child_self is
the peer of child_other.
Returns
-------
None
There is no return of this method.
|
def all_tamil( word_in ):
""" predicate checks if all letters of the input word are Tamil letters """
if isinstance(word_in,list):
word = word_in
else:
word = get_letters( word_in )
return all( [(letter in tamil_letters) for letter in word] )
|
predicate checks if all letters of the input word are Tamil letters
|
def check_permissions(self, request):
"""Call the predicate(s) associated with the RPC method, to check if the current request
can actually call the method.
Return a boolean indicating if the method should be executed (True) or not (False)"""
if not self.predicates:
return True
# All registered authentication predicates must return True
return all(
predicate(request, *self.predicates_params[i])
for i, predicate in enumerate(self.predicates)
)
|
Call the predicate(s) associated with the RPC method, to check if the current request
can actually call the method.
Return a boolean indicating if the method should be executed (True) or not (False)
|
def size(self, t=None):
"""Return the number of edges at time t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned the size of the flattened graph.
Returns
-------
nedges : int
The number of edges
See Also
--------
number_of_edges
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.size(t=0)
3
"""
s = sum(self.degree(t=t).values()) / 2
return int(s)
|
Return the number of edges at time t.
Parameters
----------
t : snapshot id (default=None)
If None will be returned the size of the flattened graph.
Returns
-------
nedges : int
The number of edges
See Also
--------
number_of_edges
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.size(t=0)
3
|
def SegmentCollection(mode="agg-fast", *args, **kwargs):
"""
mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: slower, size: medium, output: perfect, no dash)
"""
if mode == "raw":
return RawSegmentCollection(*args, **kwargs)
return AggSegmentCollection(*args, **kwargs)
|
mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: slower, size: medium, output: perfect, no dash)
|
def build_accumulate(function: Callable[[Any, Any], Tuple[Any, Any]] = None, *,
init: Any = NONE):
""" Decorator to wrap a function to return an Accumulate operator.
:param function: function to be wrapped
:param init: optional initialization for state
"""
_init = init
def _build_accumulate(function: Callable[[Any, Any], Tuple[Any, Any]]):
@wraps(function)
def _wrapper(init=NONE) -> Accumulate:
init = _init if init is NONE else init
if init is NONE:
raise TypeError('"init" argument has to be defined')
return Accumulate(function, init=init)
return _wrapper
if function:
return _build_accumulate(function)
return _build_accumulate
|
Decorator to wrap a function to return an Accumulate operator.
:param function: function to be wrapped
:param init: optional initialization for state
|
def publish(self, value):
"""
Accepts: list of tuples in the format (ip, port)
Returns: unicode
"""
if not isinstance(value, list):
raise ValueError(value)
slaves = ['%s:%d' % x for x in value]
return unicode(", ".join(slaves))
|
Accepts: list of tuples in the format (ip, port)
Returns: unicode
|
def nullify(function):
"Decorator. If empty list, returns None, else list."
def wrapper(*args, **kwargs):
value = function(*args, **kwargs)
if(type(value) == list and len(value) == 0):
return None
return value
return wrapper
|
Decorator. If empty list, returns None, else list.
|
def visit_module(self, node):
"""
A interface will be called when visiting a module.
@param node: node of current module
"""
if not node.file_stream:
# failed to open the module
return
text = node.file_stream.read()
self._checkCopyright(text, node)
if not isTestModule(node.name) and moduleNeedsTests:
self._checkTestReference(text, node)
|
A interface will be called when visiting a module.
@param node: node of current module
|
def _compute_stacksize(self):
'''
Given this object's code list, compute its maximal stack usage.
This is done by scanning the code, and computing for each opcode
the stack state at the opcode.
'''
# get local access to code, save some attribute lookups later
code = self.code
# A mapping from labels to their positions in the code list
label_pos = { op : pos
for pos, (op, arg) in enumerate(code)
if isinstance(op, Label)
}
# sf_targets are the targets of SETUP_FINALLY opcodes. They are
# recorded because they have special stack behaviour. If an exception
# was raised in the block pushed by a SETUP_FINALLY opcode, the block
# is popped and 3 objects are pushed. On return or continue, the
# block is popped and 2 objects are pushed. If nothing happened, the
# block is popped by a POP_BLOCK opcode and 1 object is pushed by a
# (LOAD_CONST, None) operation.
#
# In Python 3, the targets of SETUP_WITH have similar behavior,
# complicated by the fact that they also have an __exit__ method
# stacked and what it returns determines what they pop. So their
# stack depth is one greater, a fact we are going to ignore for the
# time being :-/
#
# Our solution is to record the stack state of SETUP_FINALLY targets
# as having 3 objects pushed, which is the maximum. However, to make
# stack recording consistent, the get_next_stacks function will always
# yield the stack state of the target as if 1 object was pushed, but
# this will be corrected in the actual stack recording.
sf_targets = set( label_pos[arg]
for op, arg in code
if op == SETUP_FINALLY or op == SETUP_WITH
)
# What we compute - for each opcode, its stack state, as an n-tuple.
# n is the number of blocks pushed. For each block, we record the number
# of objects pushed.
stacks = [None] * len(code)
def get_next_stacks(pos, curstack):
"""
Get a code position and the stack state before the operation
was done, and yield pairs (pos, curstack) for the next positions
to be explored - those are the positions to which you can get
from the given (pos, curstack).
If the given position was already explored, nothing will be yielded.
"""
op, arg = code[pos]
if isinstance(op, Label):
# We should check if we already reached a node only if it is
# a label.
if pos in sf_targets:
# Adjust a SETUP_FINALLY from 1 to 3 stack entries.
curstack = curstack[:-1] + (curstack[-1] + 2,)
if stacks[pos] is None:
stacks[pos] = curstack
else:
if stacks[pos] != curstack:
raise ValueError("Inconsistent code")
return
def newstack(n):
# Return a new stack, modified by adding n elements to the last
# block
if curstack[-1] + n < 0:
raise ValueError("Popped a non-existing element")
return curstack[:-1] + (curstack[-1]+n,)
if not isopcode(op):
# label or SetLineno - just continue to next line
yield pos+1, curstack
elif op in ( RETURN_VALUE, RAISE_VARARGS ):
# No place in particular to continue to
pass
elif op in (JUMP_FORWARD, JUMP_ABSOLUTE):
# One possibility for a jump
yield label_pos[arg], curstack
elif op in (POP_JUMP_IF_FALSE, POP_JUMP_IF_TRUE):
# Two possibilities for a jump
yield label_pos[arg], newstack(-1)
yield pos+1, newstack(-1)
elif op in (JUMP_IF_TRUE_OR_POP, JUMP_IF_FALSE_OR_POP):
# Two possibilities for a jump
yield label_pos[arg], curstack
yield pos+1, newstack(-1)
elif op == FOR_ITER:
# FOR_ITER pushes next(TOS) on success, and pops TOS and jumps
# on failure
yield label_pos[arg], newstack(-1)
yield pos+1, newstack(1)
elif op == BREAK_LOOP:
# BREAK_LOOP goes to the end of a loop and pops a block
# but like RETURN_VALUE we have no instruction position
# to give. For now treat like RETURN_VALUE
pass
elif op == CONTINUE_LOOP:
# CONTINUE_LOOP jumps to the beginning of a loop which should
# already have been discovered. It does not change the stack
# state nor does it create or pop a block.
#yield label_pos[arg], curstack
#yield label_pos[arg], curstack[:-1]
pass
elif op == SETUP_LOOP:
# We continue with a new block.
# On break, we jump to the label and return to current stack
# state.
yield label_pos[arg], curstack
yield pos+1, curstack + (0,)
elif op == SETUP_EXCEPT:
# We continue with a new block.
# On exception, we jump to the label with 3 extra objects on
# stack
yield label_pos[arg], newstack(3)
yield pos+1, curstack + (0,)
elif op == SETUP_FINALLY or op == SETUP_WITH :
# We continue with a new block.
# On exception, we jump to the label with 3 extra objects on
# stack, but to keep stack recording consistent, we behave as
# if we add only 1 object. Extra 2 will be added to the actual
# recording.
yield label_pos[arg], newstack(1)
yield pos+1, curstack + ( int(op == SETUP_WITH) ,)
elif op == POP_BLOCK:
# Just pop the block
yield pos+1, curstack[:-1]
elif op == END_FINALLY :
# Since stack recording of SETUP_FINALLY targets is of 3 pushed
# objects (as when an exception is raised), we pop 3 objects.
yield pos+1, newstack(-3)
elif op == _WITH_CLEANUP_OPCODE:
# Since WITH_CLEANUP[_START] is always found after SETUP_FINALLY
# targets, and the stack recording is that of a raised
# exception, we can simply pop 1 object and let END_FINALLY
# pop the remaining 3.
yield pos+1, newstack(-1)
else:
# nothing special, use the CPython value
yield pos+1, newstack( stack_effect( op, arg ) )
# Now comes the calculation: open_positions holds positions which are
# yet to be explored. In each step we take one open position, and
# explore it by appending the positions to which it can go, to
# open_positions. On the way, we update maxsize.
#
# open_positions is a list of tuples: (pos, stack state)
#
# Sneaky Python coding trick here. get_next_stacks() is a generator,
# it contains yield statements. So when we call get_next_stacks()
# what is returned is an iterator. However, the yield statements in
# get_next_stacks() are not in a loop as usual; rather it is
# straight-line code that will execute 0, 1 or 2 yields depending on
# the Opcode at pos.
#
# the list.extend() method takes an iterator and exhausts it, adding
# all yielded values to the list. Hence the statement
#
# open_positions.extend(get_next_stacks(pos,curstack))
#
# appends 0, 1 or 2 tuples (pos, stack_state) to open_positions.
maxsize = 0
open_positions = [(0, (0,))]
while open_positions:
pos, curstack = open_positions.pop()
maxsize = max(maxsize, sum(curstack))
open_positions.extend(get_next_stacks(pos, curstack))
return maxsize
|
Given this object's code list, compute its maximal stack usage.
This is done by scanning the code, and computing for each opcode
the stack state at the opcode.
|
def delete_script(delete=None): # noqa: E501
"""Delete a script
Delete a script # noqa: E501
:param delete: The data needed to delete this script
:type delete: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
delete = Delete.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
Delete a script
Delete a script # noqa: E501
:param delete: The data needed to delete this script
:type delete: dict | bytes
:rtype: Response
|
def transform_velocity_array(array, pos_array, vel, euler, rotation_vel=(0,0,0)):
"""
Transform any Nx3 velocity vector array by adding the center-of-mass 'vel',
accounting for solid-body rotation, and applying an euler transformation.
:parameter array array: numpy array of Nx3 velocity vectors in the original
(star) coordinate frame
:parameter array pos_array: positions of the elements with respect to the
original (star) coordinate frame. Must be the same shape as 'array'.
:parameter array vel: numpy array with length 3 giving cartesian velocity
offsets in the new (system) coordinate frame
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter array rotation_vel: vector of the rotation velocity of the star
in the original (star) coordinate frame
:return: new velocity array with same shape as 'array'
"""
trans_matrix = euler_trans_matrix(*euler)
# v_{rot,i} = omega x r_i with omega = rotation_vel
rotation_component = np.cross(rotation_vel, pos_array, axisb=1)
orbital_component = np.asarray(vel)
if isinstance(array, ComputedColumn):
array = array.for_computations
new_vel = np.dot(np.asarray(array)+rotation_component, trans_matrix.T) + orbital_component
return new_vel
|
Transform any Nx3 velocity vector array by adding the center-of-mass 'vel',
accounting for solid-body rotation, and applying an euler transformation.
:parameter array array: numpy array of Nx3 velocity vectors in the original
(star) coordinate frame
:parameter array pos_array: positions of the elements with respect to the
original (star) coordinate frame. Must be the same shape as 'array'.
:parameter array vel: numpy array with length 3 giving cartesian velocity
offsets in the new (system) coordinate frame
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter array rotation_vel: vector of the rotation velocity of the star
in the original (star) coordinate frame
:return: new velocity array with same shape as 'array'
|
def get_market_last(symbols=None, **kwargs):
"""
MOVED to iexfinance.iexdata.get_last
"""
import warnings
warnings.warn(WNG_MSG % ("get_market_last", "iexdata.get_last"))
return Last(symbols, **kwargs).fetch()
|
MOVED to iexfinance.iexdata.get_last
|
def index_transcriptome(gtf_file, ref_file, data):
"""
use a GTF file and a reference FASTA file to index the transcriptome
"""
gtf_fasta = gtf.gtf_to_fasta(gtf_file, ref_file)
bowtie2_index = os.path.splitext(gtf_fasta)[0]
bowtie2_build = config_utils.get_program("bowtie2", data["config"]) + "-build"
cmd = "{bowtie2_build} --offrate 1 {gtf_fasta} {bowtie2_index}".format(**locals())
message = "Creating transcriptome index of %s with bowtie2." % (gtf_fasta)
do.run(cmd, message)
return bowtie2_index
|
use a GTF file and a reference FASTA file to index the transcriptome
|
def do_read(self, args):
"""Receive from the resource in use."""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
print(self.current.read())
except Exception as e:
print(e)
|
Receive from the resource in use.
|
def add_lambda_integration(self):
"""Attach lambda found to API."""
lambda_uri = self.generate_uris()['lambda_uri']
self.client.put_integration(
restApiId=self.api_id,
resourceId=self.resource_id,
httpMethod=self.trigger_settings['method'],
integrationHttpMethod='POST',
uri=lambda_uri,
type='AWS')
self.add_integration_response()
self.log.info("Successfully added Lambda intergration to API")
|
Attach lambda found to API.
|
def read_value(self, dtype='uint64', count=1, advance=True):
"""
Read one or more scalars of the indicated dtype. Count specifies the number of
scalars to be read in.
"""
data = np.frombuffer(self._blob, dtype=dtype, count=count, offset=self.pos)
if advance:
# probably the same thing as data.nbytes * 8
self._pos += data.dtype.itemsize * data.size
if count == 1:
data = data[0]
return data
|
Read one or more scalars of the indicated dtype. Count specifies the number of
scalars to be read in.
|
def from_body(self, param_name, schema):
"""
A decorator that converts the request body into a function parameter based on the specified schema.
:param param_name: The parameter which receives the argument.
:param schema: The schema class or instance used to deserialize the request body toa Python object.
:return: A function
"""
schema = schema() if isclass(schema) else schema
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs[param_name] = self.__parse_body(schema)
return func(*args, **kwargs)
return wrapper
return decorator
|
A decorator that converts the request body into a function parameter based on the specified schema.
:param param_name: The parameter which receives the argument.
:param schema: The schema class or instance used to deserialize the request body toa Python object.
:return: A function
|
def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None,
tgt_filter=None, reply_mac=None):
"""
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
# Get and compare the MAC address
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must be the unspecified address
if req[IPv6].src != "::":
return 0
# Check destination is the link-local solicited-node multicast
# address associated with target address in received NS
tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
received_snma = inet_pton(socket.AF_INET6, req[IPv6].dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
return 0
return 1
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, iface),
iface=iface)
|
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
|
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
|
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.