code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _bnot8(ins):
""" Negates (BITWISE NOT) top of the stack (8 bits in AF)
"""
output = _8bit_oper(ins.quad[2])
output.append('cpl') # Gives carry only if A = 0
output.append('push af')
return output
|
Negates (BITWISE NOT) top of the stack (8 bits in AF)
|
def emit_accepted(self):
"""
Sends signal that the file dialog was closed properly.
Sends:
filename
"""
if self.result():
filename = self.selectedFiles()[0]
if os.path.isdir(os.path.dirname(filename)):
self.dlg_accepted.emit(filename)
|
Sends signal that the file dialog was closed properly.
Sends:
filename
|
def merge_cycles(self):
"""Work on this graph and remove cycles, with nodes containing concatonated lists of payloads"""
while True:
### remove any self edges
own_edges = self.get_self_edges()
if len(own_edges) > 0:
for e in own_edges: self.remove_edge(e)
c = self.find_cycle()
if not c: return
keep = c[0]
remove_list = c[1:]
for n in remove_list: self.move_edges(n,keep)
for n in remove_list: keep.payload_list += n.payload_list
for n in remove_list: self.remove_node(n)
|
Work on this graph and remove cycles, with nodes containing concatonated lists of payloads
|
def process_gatt_service(services, event):
"""Process a BGAPI event containing a GATT service description and add it to a dictionary
Args:
services (dict): A dictionary of discovered services that is updated with this event
event (BGAPIPacket): An event containing a GATT service
"""
length = len(event.payload) - 5
handle, start, end, uuid = unpack('<BHH%ds' % length, event.payload)
uuid = process_uuid(uuid)
services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end}
|
Process a BGAPI event containing a GATT service description and add it to a dictionary
Args:
services (dict): A dictionary of discovered services that is updated with this event
event (BGAPIPacket): An event containing a GATT service
|
def _get(self, field):
"""
Return the value for the queried field.
Get the value of a given field. The list of all queryable fields is
documented in the beginning of the model class.
>>> out = m._get('graph')
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out : value
The current value of the requested field.
"""
if field in self._list_fields():
return self.__proxy__.get(field)
else:
raise KeyError('Key \"%s\" not in model. Available fields are %s.' % (field, ', '.join(self._list_fields())))
|
Return the value for the queried field.
Get the value of a given field. The list of all queryable fields is
documented in the beginning of the model class.
>>> out = m._get('graph')
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out : value
The current value of the requested field.
|
def prox_xline(x, step):
"""Projection onto line in x"""
if not np.isscalar(x):
x= x[0]
if x > 0.5:
return np.array([0.5])
else:
return np.array([x])
|
Projection onto line in x
|
def read_retry(sensor, pin, retries=15, delay_seconds=2, platform=None):
"""Read DHT sensor of specified sensor type (DHT11, DHT22, or AM2302) on
specified pin and return a tuple of humidity (as a floating point value
in percent) and temperature (as a floating point value in Celsius).
Unlike the read function, this read_retry function will attempt to read
multiple times (up to the specified max retries) until a good reading can be
found. If a good reading cannot be found after the amount of retries, a tuple
of (None, None) is returned. The delay between retries is by default 2
seconds, but can be overridden.
"""
for i in range(retries):
humidity, temperature = read(sensor, pin, platform)
if humidity is not None and temperature is not None:
return (humidity, temperature)
time.sleep(delay_seconds)
return (None, None)
|
Read DHT sensor of specified sensor type (DHT11, DHT22, or AM2302) on
specified pin and return a tuple of humidity (as a floating point value
in percent) and temperature (as a floating point value in Celsius).
Unlike the read function, this read_retry function will attempt to read
multiple times (up to the specified max retries) until a good reading can be
found. If a good reading cannot be found after the amount of retries, a tuple
of (None, None) is returned. The delay between retries is by default 2
seconds, but can be overridden.
|
def clone(self):
"""
Create a complete copy of self.
:returns: A MaterialPackage that is identical to self.
"""
result = copy.copy(self)
result.size_class_masses = copy.deepcopy(self.size_class_masses)
return result
|
Create a complete copy of self.
:returns: A MaterialPackage that is identical to self.
|
def generate_csv(path, out):
"""\
Walks through the `path` and generates the CSV file `out`
"""
def is_berlin_cable(filename):
return 'BERLIN' in filename
writer = UnicodeWriter(open(out, 'wb'), delimiter=';')
writer.writerow(('Reference ID', 'Created', 'Origin', 'Subject'))
for cable in cables_from_source(path, predicate=is_berlin_cable):
writer.writerow((cable.reference_id, cable.created, cable.origin, titlefy(cable.subject)))
|
\
Walks through the `path` and generates the CSV file `out`
|
def get(self, request, format=None):
""" get HTTP method """
action = request.query_params.get('action', 'unread')
# action can be only "unread" (default), "count" and "all"
action = action if action == 'count' or action == 'all' else 'unread'
# mark as read parameter, defaults to true
mark_as_read = request.query_params.get('read', 'true') == 'true'
# queryset
notifications = self.get_queryset().filter(to_user=request.user)
# pass to specific action
return getattr(self, 'get_%s' % action)(request, notifications, mark_as_read)
|
get HTTP method
|
def binaryEntropyVectorized(x):
"""
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
"""
entropy = - x*np.log2(x) - (1-x)*np.log2(1-x)
entropy[x*(1 - x) == 0] = 0
return entropy
|
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
|
def choice(*es):
"""
Create a PEG function to match an ordered choice.
"""
msg = 'Expected one of: {}'.format(', '.join(map(repr, es)))
def match_choice(s, grm=None, pos=0):
errs = []
for e in es:
try:
return e(s, grm, pos)
except PegreError as ex:
errs.append((ex.message, ex.position))
if errs:
raise PegreChoiceError(errs, pos)
return match_choice
|
Create a PEG function to match an ordered choice.
|
def _request_api(self, **kwargs):
"""Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
"""
_url = kwargs.get('url')
_method = kwargs.get('method', 'GET')
_status = kwargs.get('status', 200)
counter = 0
if _method not in ['GET', 'POST']:
raise ValueError('Method is not GET or POST')
while True:
try:
res = REQ[_method](_url, cookies=self._cookie)
if res.status_code == _status:
break
else:
raise BadStatusException(res.content)
except requests.exceptions.BaseHTTPError:
if counter < self._retries:
counter += 1
continue
raise MaxRetryError
self._last_result = res
return res
|
Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
|
def get_description(self):
"""
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns str: WF description
"""
paths = ['bpmn:collaboration/bpmn:participant/bpmn:documentation',
'bpmn:collaboration/bpmn:documentation',
'bpmn:process/bpmn:documentation']
for path in paths:
elm = self.root.find(path, NS)
if elm is not None and elm.text:
return elm.text
|
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns str: WF description
|
def _get_descending_key(gettime=time.time):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
request_id_hash = os.environ.get("REQUEST_ID_HASH")
if not request_id_hash:
request_id_hash = str(random.getrandbits(32))
return "%d%s" % (now_descending, request_id_hash)
|
Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key.
|
def chat_unfurl(
self, *, channel: str, ts: str, unfurls: dict, **kwargs
) -> SlackResponse:
"""Provide custom unfurl behavior for user-posted URLs.
Args:
channel (str): The Channel ID of the message. e.g. 'C1234567890'
ts (str): Timestamp of the message to add unfurl behavior to. e.g. '1234567890.123456'
unfurls (dict): a dict of the specific URLs you're offering an unfurl for.
e.g. {"https://example.com/": {"text": "Every day is the test."}}
"""
self._validate_xoxp_token()
kwargs.update({"channel": channel, "ts": ts, "unfurls": unfurls})
return self.api_call("chat.unfurl", json=kwargs)
|
Provide custom unfurl behavior for user-posted URLs.
Args:
channel (str): The Channel ID of the message. e.g. 'C1234567890'
ts (str): Timestamp of the message to add unfurl behavior to. e.g. '1234567890.123456'
unfurls (dict): a dict of the specific URLs you're offering an unfurl for.
e.g. {"https://example.com/": {"text": "Every day is the test."}}
|
def add_to_linestring(position_data, kml_linestring):
'''add a point to the kml file'''
global kml
# add altitude offset
position_data[2] += float(args.aoff)
kml_linestring.coords.addcoordinates([position_data])
|
add a point to the kml file
|
def fit_params_to_1d_data(logX):
"""
Fit skewed normal distributions to 1-D capactity data,
and return the distribution parameters.
Args
----
logX:
Logarithm of one-dimensional capacity data,
indexed by module and phase resolution index
"""
m_max = logX.shape[0]
p_max = logX.shape[1]
params = np.zeros((m_max, p_max, 3))
for m_ in range(m_max):
for p_ in range(p_max):
params[m_,p_] = skewnorm.fit(logX[m_,p_])
return params
|
Fit skewed normal distributions to 1-D capactity data,
and return the distribution parameters.
Args
----
logX:
Logarithm of one-dimensional capacity data,
indexed by module and phase resolution index
|
def avail(search=None, verbose=False):
'''
Return a list of available images
search : string
search keyword
verbose : boolean (False)
toggle verbose output
CLI Example:
.. code-block:: bash
salt '*' imgadm.avail [percona]
salt '*' imgadm.avail verbose=True
'''
ret = {}
cmd = 'imgadm avail -j'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
return ret
for image in salt.utils.json.loads(res['stdout']):
if image['manifest']['disabled'] or not image['manifest']['public']:
continue
if search and search not in image['manifest']['name']:
# we skip if we are searching but don't have a match
continue
uuid = image['manifest']['uuid']
data = _parse_image_meta(image, verbose)
if data:
ret[uuid] = data
return ret
|
Return a list of available images
search : string
search keyword
verbose : boolean (False)
toggle verbose output
CLI Example:
.. code-block:: bash
salt '*' imgadm.avail [percona]
salt '*' imgadm.avail verbose=True
|
def pack(self):
'''pack a FD FDM buffer from current values'''
for i in range(len(self.values)):
if math.isnan(self.values[i]):
self.values[i] = 0
return struct.pack(self.pack_string, *self.values)
|
pack a FD FDM buffer from current values
|
def getCompleteFile(self, basepath):
"""Get filename indicating all comics are downloaded."""
dirname = getDirname(self.getName())
return os.path.join(basepath, dirname, "complete.txt")
|
Get filename indicating all comics are downloaded.
|
def execute_javascript(self, *args, **kwargs):
'''
Execute a javascript string in the context of the browser tab.
'''
ret = self.__exec_js(*args, **kwargs)
return ret
|
Execute a javascript string in the context of the browser tab.
|
def word_to_id(self, word):
"""Returns the integer word id of a word string."""
if word in self.vocab:
return self.vocab[word]
else:
return self.unk_id
|
Returns the integer word id of a word string.
|
def node_link_graph(data: Mapping[str, Any]) -> BELGraph:
"""Return graph from node-link data format.
Adapted from :func:`networkx.readwrite.json_graph.node_link_graph`
"""
graph = BELGraph()
graph.graph = data.get('graph', {})
graph.graph[GRAPH_ANNOTATION_LIST] = {
keyword: set(values)
for keyword, values in graph.graph.get(GRAPH_ANNOTATION_LIST, {}).items()
}
mapping = []
for node_data in data['nodes']:
_dsl = parse_result_to_dsl(node_data)
node = graph.add_node_from_data(_dsl)
mapping.append(node)
for data in data['links']:
u = mapping[data['source']]
v = mapping[data['target']]
edge_data = {
k: v
for k, v in data.items()
if k not in {'source', 'target', 'key'}
}
graph.add_edge(u, v, key=data['key'], **edge_data)
return graph
|
Return graph from node-link data format.
Adapted from :func:`networkx.readwrite.json_graph.node_link_graph`
|
def cmd_ublox(self, args):
'''control behaviour of the module'''
if len(args) == 0:
print(self.usage())
elif args[0] == "status":
print(self.cmd_status())
elif args[0] == "set":
self.ublox_settings.command(args[1:])
elif args[0] == "reset":
self.cmd_ublox_reset(args[1:])
elif args[0] == "mga":
self.cmd_ublox_mga(args[1:])
else:
print(self.usage())
|
control behaviour of the module
|
def get_user_best(self, username, *, mode=OsuMode.osu, limit=50):
"""Get a user's best scores.
Parameters
----------
username : str or int
A `str` representing the user's username, or an `int` representing the user's id.
mode : :class:`osuapi.enums.OsuMode`
The osu! game mode for which to look up. Defaults to osu!standard.
limit
The maximum number of results to return. Defaults to 50, maximum 100.
"""
return self._make_req(endpoints.USER_BEST, dict(
k=self.key,
u=username,
type=_username_type(username),
m=mode.value,
limit=limit
), JsonList(SoloScore))
|
Get a user's best scores.
Parameters
----------
username : str or int
A `str` representing the user's username, or an `int` representing the user's id.
mode : :class:`osuapi.enums.OsuMode`
The osu! game mode for which to look up. Defaults to osu!standard.
limit
The maximum number of results to return. Defaults to 50, maximum 100.
|
def solution(self, e, v, extra_constraints=(), exact=None):
"""
Return True if `v` is a solution of `expr` with the extra constraints, False otherwise.
:param e: An expression (an AST) to evaluate
:param v: The proposed solution (an AST)
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param exact: If False, return approximate solutions.
:return: True if `v` is a solution of `expr`, False otherwise
"""
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.solution(e, v, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.solution(e, v, extra_constraints=self._adjust_constraint_list(extra_constraints))
if er is True:
assert ar is True
return ar
return self._solver.solution(e, v, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
|
Return True if `v` is a solution of `expr` with the extra constraints, False otherwise.
:param e: An expression (an AST) to evaluate
:param v: The proposed solution (an AST)
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param exact: If False, return approximate solutions.
:return: True if `v` is a solution of `expr`, False otherwise
|
def add_phenotype(self, ind_obj, phenotype_id):
"""Add a phenotype term to the case."""
if phenotype_id.startswith('HP:') or len(phenotype_id) == 7:
logger.debug('querying on HPO term')
hpo_results = phizz.query_hpo([phenotype_id])
else:
logger.debug('querying on OMIM term')
hpo_results = phizz.query_disease([phenotype_id])
added_terms = [] if hpo_results else None
existing_ids = set(term.phenotype_id for term in ind_obj.phenotypes)
for result in hpo_results:
if result['hpo_term'] not in existing_ids:
term = PhenotypeTerm(phenotype_id=result['hpo_term'],
description=result['description'])
logger.info('adding new HPO term: %s', term.phenotype_id)
ind_obj.phenotypes.append(term)
added_terms.append(term)
logger.debug('storing new HPO terms')
self.save()
if added_terms is not None and len(added_terms) > 0:
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj)
return added_terms
|
Add a phenotype term to the case.
|
def format_camel_case(text):
"""
Example::
ThisIsVeryGood
**中文文档**
将文本格式化为各单词首字母大写, 拼接而成的长变量名。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
raise ValueError("can not be empty string!")
else:
text = text.lower() # lower all char
# delete redundant empty space
words = list()
word = list()
for char in text:
if char in ALPHA_DIGITS:
word.append(char)
else:
if len(word):
words.append("".join(word))
word = list()
if len(word):
words.append("".join(word))
words = [word[0].upper() + word[1:] for word in words]
return "".join(words)
|
Example::
ThisIsVeryGood
**中文文档**
将文本格式化为各单词首字母大写, 拼接而成的长变量名。
|
def _control_vm(self, command, expected=None):
"""
Executes a command with QEMU monitor when this VM is running.
:param command: QEMU monitor command (e.g. info status, stop etc.)
:param expected: An array of expected strings
:returns: result of the command (matched object or None)
"""
result = None
if self.is_running() and self._monitor:
log.debug("Execute QEMU monitor command: {}".format(command))
try:
log.info("Connecting to Qemu monitor on {}:{}".format(self._monitor_host, self._monitor))
reader, writer = yield from asyncio.open_connection(self._monitor_host, self._monitor)
except OSError as e:
log.warn("Could not connect to QEMU monitor: {}".format(e))
return result
try:
writer.write(command.encode('ascii') + b"\n")
except OSError as e:
log.warn("Could not write to QEMU monitor: {}".format(e))
writer.close()
return result
if expected:
try:
while result is None:
line = yield from reader.readline()
if not line:
break
for expect in expected:
if expect in line:
result = line.decode("utf-8").strip()
break
except EOFError as e:
log.warn("Could not read from QEMU monitor: {}".format(e))
writer.close()
return result
|
Executes a command with QEMU monitor when this VM is running.
:param command: QEMU monitor command (e.g. info status, stop etc.)
:param expected: An array of expected strings
:returns: result of the command (matched object or None)
|
def clustered_vert(script, cell_size=1.0, strategy='AVERAGE', selected=False):
""" "Create a new layer populated with a subsampling of the vertexes of the
current mesh
The subsampling is driven by a simple one-per-gridded cell strategy.
Args:
script: the FilterScript object or script filename to write
the filter to.
cell_size (float): The size of the cell of the clustering grid. Smaller the cell finer the resulting mesh. For obtaining a very coarse mesh use larger values.
strategy (enum 'AVERAGE' or 'CENTER'): <b>Average</b>: for each cell we take the average of the sample falling into. The resulting point is a new point.<br><b>Closest to center</b>: for each cell we take the sample that is closest to the center of the cell. Choosen vertices are a subset of the original ones.
selected (bool): If true only for the filter is applied only on the selected subset of the mesh.
Layer stack:
Creates new layer 'Cluster Samples'. Current layer is changed to the new
layer.
MeshLab versions:
2016.12
1.3.4BETA
"""
if strategy.lower() == 'average':
strategy_num = 0
elif strategy.lower() == 'center':
strategy_num = 1
filter_xml = ''.join([
' <filter name="Clustered Vertex Subsampling">\n',
' <Param name="Threshold" ',
'value="{}" '.format(cell_size),
'description="Cell Size" ',
'min="0" ',
'max="1000" ',
'type="RichAbsPerc" ',
'/>\n',
' <Param name="Sampling" ',
'value="{:d}" '.format(strategy_num),
'description="Representative Strategy:" ',
'enum_val0="Average" ',
'enum_val1="Closest to center" ',
'enum_cardinality="2" ',
'type="RichEnum" ',
'/>\n',
' <Param name="Selected" ',
'value="{}" '.format(str(selected).lower()),
'description="Selected" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Cluster Samples')
return None
|
"Create a new layer populated with a subsampling of the vertexes of the
current mesh
The subsampling is driven by a simple one-per-gridded cell strategy.
Args:
script: the FilterScript object or script filename to write
the filter to.
cell_size (float): The size of the cell of the clustering grid. Smaller the cell finer the resulting mesh. For obtaining a very coarse mesh use larger values.
strategy (enum 'AVERAGE' or 'CENTER'): <b>Average</b>: for each cell we take the average of the sample falling into. The resulting point is a new point.<br><b>Closest to center</b>: for each cell we take the sample that is closest to the center of the cell. Choosen vertices are a subset of the original ones.
selected (bool): If true only for the filter is applied only on the selected subset of the mesh.
Layer stack:
Creates new layer 'Cluster Samples'. Current layer is changed to the new
layer.
MeshLab versions:
2016.12
1.3.4BETA
|
def execute_nb(fname, metadata=None, save=True, show_doc_only=False):
"Execute notebook `fname` with `metadata` for preprocessing."
# Any module used in the notebook that isn't inside must be in the same directory as this script
with open(fname) as f: nb = nbformat.read(f, as_version=4)
ep_class = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor
ep = ep_class(timeout=600, kernel_name='python3')
metadata = metadata or {}
ep.preprocess(nb, metadata)
if save:
with open(fname, 'wt') as f: nbformat.write(nb, f)
NotebookNotary().sign(nb)
|
Execute notebook `fname` with `metadata` for preprocessing.
|
def _pull(keys):
"""helper method for implementing `client.pull` via `client.apply`"""
user_ns = globals()
if isinstance(keys, (list,tuple, set)):
for key in keys:
if not user_ns.has_key(key):
raise NameError("name '%s' is not defined"%key)
return map(user_ns.get, keys)
else:
if not user_ns.has_key(keys):
raise NameError("name '%s' is not defined"%keys)
return user_ns.get(keys)
|
helper method for implementing `client.pull` via `client.apply`
|
def disassociate_failure_node(self, parent, child):
"""Remove a failure node link.
The resulatant 2 nodes will both become root nodes.
=====API DOCS=====
Remove a failure node link.
:param parent: Primary key of parent node to disassociate failure node from.
:type parent: int
:param child: Primary key of child node to be disassociated.
:type child: int
:returns: Dictionary of only one key "changed", which indicates whether the disassociation succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._disassoc(
self._forward_rel_name('failure'), parent, child)
|
Remove a failure node link.
The resulatant 2 nodes will both become root nodes.
=====API DOCS=====
Remove a failure node link.
:param parent: Primary key of parent node to disassociate failure node from.
:type parent: int
:param child: Primary key of child node to be disassociated.
:type child: int
:returns: Dictionary of only one key "changed", which indicates whether the disassociation succeeded.
:rtype: dict
=====API DOCS=====
|
def parse_list_line_windows(self, b):
"""
Parsing Microsoft Windows `dir` output
:param b: response line
:type b: :py:class:`bytes` or :py:class:`str`
:return: (path, info)
:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)
"""
line = b.decode(encoding=self.encoding).rstrip("\r\n")
date_time_end = line.index("M")
date_time_str = line[:date_time_end + 1].strip().split(" ")
date_time_str = " ".join([x for x in date_time_str if len(x) > 0])
line = line[date_time_end + 1:].lstrip()
with setlocale("C"):
strptime = datetime.datetime.strptime
date_time = strptime(date_time_str, "%m/%d/%Y %I:%M %p")
info = {}
info["modify"] = self.format_date_time(date_time)
next_space = line.index(" ")
if line.startswith("<DIR>"):
info["type"] = "dir"
else:
info["type"] = "file"
info["size"] = line[:next_space].replace(",", "")
if not info["size"].isdigit():
raise ValueError
# This here could cause a problem if a filename started with
# whitespace, but if we were to try to detect such a condition
# we would have to make strong assumptions about the input format
filename = line[next_space:].lstrip()
if filename == "." or filename == "..":
raise ValueError
return pathlib.PurePosixPath(filename), info
|
Parsing Microsoft Windows `dir` output
:param b: response line
:type b: :py:class:`bytes` or :py:class:`str`
:return: (path, info)
:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)
|
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
|
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
|
def runfile(filename, args=None, wdir=None, namespace=None):
"""
Run filename
args: command line arguments (string)
wdir: working directory
"""
try:
if hasattr(filename, 'decode'):
filename = filename.decode('utf-8')
except (UnicodeError, TypeError):
pass
global __umd__
if os.environ.get("PYDEV_UMD_ENABLED", "").lower() == "true":
if __umd__ is None:
namelist = os.environ.get("PYDEV_UMD_NAMELIST", None)
if namelist is not None:
namelist = namelist.split(',')
__umd__ = UserModuleDeleter(namelist=namelist)
else:
verbose = os.environ.get("PYDEV_UMD_VERBOSE", "").lower() == "true"
__umd__.run(verbose=verbose)
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
if '__file__' in namespace:
old_file = namespace['__file__']
else:
old_file = None
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in args.split():
sys.argv.append(arg)
if wdir is not None:
try:
if hasattr(wdir, 'decode'):
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError):
pass
os.chdir(wdir)
execfile(filename, namespace)
sys.argv = ['']
if old_file is None:
del namespace['__file__']
else:
namespace['__file__'] = old_file
|
Run filename
args: command line arguments (string)
wdir: working directory
|
def stop(self, timeout=None):
"""
Send the GET request required to stop the scan
If timeout is not specified we just send the request and return. When
it is the method will wait for (at most) :timeout: seconds until the
scan changes it's status/stops. If the timeout is reached then an
exception is raised.
:param timeout: The timeout in seconds
:return: None, an exception is raised if the timeout is exceeded
"""
assert self.scan_id is not None, 'No scan_id has been set'
#
# Simple stop
#
if timeout is None:
url = '/scans/%s/stop' % self.scan_id
self.conn.send_request(url, method='GET')
return
#
# Stop with timeout
#
self.stop()
for _ in xrange(timeout):
time.sleep(1)
is_running = self.get_status()['is_running']
if not is_running:
return
msg = 'Failed to stop the scan in %s seconds'
raise ScanStopTimeoutException(msg % timeout)
|
Send the GET request required to stop the scan
If timeout is not specified we just send the request and return. When
it is the method will wait for (at most) :timeout: seconds until the
scan changes it's status/stops. If the timeout is reached then an
exception is raised.
:param timeout: The timeout in seconds
:return: None, an exception is raised if the timeout is exceeded
|
def _axis_in_detector(geometry):
"""A vector in the detector plane that points along the rotation axis."""
du, dv = geometry.det_axes_init
axis = geometry.axis
c = np.array([np.vdot(axis, du), np.vdot(axis, dv)])
cnorm = np.linalg.norm(c)
# Check for numerical errors
assert cnorm != 0
return c / cnorm
|
A vector in the detector plane that points along the rotation axis.
|
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['invenio-github'] = self
@app.before_first_request
def connect_signals():
"""Connect OAuthClient signals."""
from invenio_oauthclient.models import RemoteAccount
from invenio_oauthclient.signals import account_setup_committed
from .api import GitHubAPI
from .handlers import account_post_init
account_setup_committed.connect(
account_post_init,
sender=GitHubAPI.remote._get_current_object()
)
@event.listens_for(RemoteAccount, 'before_delete')
def receive_before_delete(mapper, connection, target):
"""Listen for the 'before_delete' event."""
|
Flask application initialization.
|
def pip_install_requirements(requirements, constraints=None, **options):
"""Install a requirements file.
:param constraints: Path to pip constraints file.
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
"""
command = ["install"]
available_options = ('proxy', 'src', 'log', )
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
if constraints:
command.append("-c {0}".format(constraints))
log("Installing from file: {} with constraints {} "
"and options: {}".format(requirements, constraints, command))
else:
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)
|
Install a requirements file.
:param constraints: Path to pip constraints file.
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
|
def prettify(root, encoding='utf-8'):
"""
Return a pretty-printed XML string for the Element.
@see: http://www.doughellmann.com/PyMOTW/xml/etree/ElementTree/create.html
"""
if isinstance(root, ElementTree.Element):
node = ElementTree.tostring(root, 'utf-8')
else:
node = root
# Hacky solution as it seems PyXML doesn't exist anymore...
return etree.tostring(etree.fromstring(node),
pretty_print=True,
xml_declaration=True,
encoding='utf-8')
|
Return a pretty-printed XML string for the Element.
@see: http://www.doughellmann.com/PyMOTW/xml/etree/ElementTree/create.html
|
def download_user_playlists_by_search(self, user_name):
"""Download user's playlists by his/her name.
:params user_name: user name.
"""
try:
user = self.crawler.search_user(user_name, self.quiet)
except RequestException as exception:
click.echo(exception)
else:
self.download_user_playlists_by_id(user.user_id)
|
Download user's playlists by his/her name.
:params user_name: user name.
|
def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=None):
"""Function to return an abbreviated representation of the interface name.
:param interface: The interface you are attempting to abbreviate.
:param addl_name_map (optional): A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:param addl_reverse_map (optional): A dict containing key/value pairs that updates
the reverse mapping. Used if an OS has specific differences. e.g. {"PortChannel": "Po"} vs
{"PortChannel": "po"}
"""
name_map = {}
name_map.update(base_interfaces)
interface_type, interface_number = split_interface(interface)
if isinstance(addl_name_map, dict):
name_map.update(addl_name_map)
rev_name_map = {}
rev_name_map.update(reverse_mapping)
if isinstance(addl_reverse_map, dict):
rev_name_map.update(addl_reverse_map)
# Try to ensure canonical type.
if name_map.get(interface_type):
canonical_type = name_map.get(interface_type)
else:
canonical_type = interface_type
try:
abbreviated_name = rev_name_map[canonical_type] + py23_compat.text_type(
interface_number
)
return abbreviated_name
except KeyError:
pass
# If abbreviated name lookup fails, return original name
return interface
|
Function to return an abbreviated representation of the interface name.
:param interface: The interface you are attempting to abbreviate.
:param addl_name_map (optional): A dict containing key/value pairs that updates
the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs
{"Po": "Port-Channel"}
:param addl_reverse_map (optional): A dict containing key/value pairs that updates
the reverse mapping. Used if an OS has specific differences. e.g. {"PortChannel": "Po"} vs
{"PortChannel": "po"}
|
def get_raw_path(self, include_self=False):
"""Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the
path to a logical volume. This is used to determine the source path for a mount call.
The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific
path, only its children return the more specific path, this volume itself will keep returning the same path.
This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this
with the include_self argument.
This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes.
This includes VSS stores and LV volumes.
"""
v = self
if not include_self:
# lv / vss_store are exceptions, as it covers the volume itself, not the child volume
if v._paths.get('lv'):
return v._paths['lv']
elif v._paths.get('vss_store'):
return v._paths['vss_store']
elif v.parent and v.parent != self.disk:
v = v.parent
else:
return self.disk.get_fs_path()
while True:
if v._paths.get('lv'):
return v._paths['lv']
elif v._paths.get('bde'):
return v._paths['bde'] + '/bde1'
elif v._paths.get('luks'):
return '/dev/mapper/' + v._paths['luks']
elif v._paths.get('md'):
return v._paths['md']
elif v._paths.get('vss_store'):
return v._paths['vss_store']
# Only if the volume has a parent that is not a disk, we try to check the parent for a location.
if v.parent and v.parent != self.disk:
v = v.parent
else:
break
return self.disk.get_fs_path()
|
Retrieves the base mount path of the volume. Typically equals to :func:`Disk.get_fs_path` but may also be the
path to a logical volume. This is used to determine the source path for a mount call.
The value returned is normally based on the parent's paths, e.g. if this volume is mounted to a more specific
path, only its children return the more specific path, this volume itself will keep returning the same path.
This makes for consistent use of the offset attribute. If you do not need this behaviour, you can override this
with the include_self argument.
This behavior, however, is not retained for paths that directly affect the volume itself, not the child volumes.
This includes VSS stores and LV volumes.
|
def create_role(name):
"""
Create a new role.
"""
role = role_manager.create(name=name)
if click.confirm(f'Are you sure you want to create {role!r}?'):
role_manager.save(role, commit=True)
click.echo(f'Successfully created {role!r}')
else:
click.echo('Cancelled.')
|
Create a new role.
|
def handle(self, state, message=False):
"""
Handle a state update.
:param state: the new chat state
:type state: :class:`~aioxmpp.chatstates.ChatState`
:param message: pass true to indicate that we handle the
:data:`ACTIVE` state that is implied by
sending a content message.
:type message: :class:`bool`
:returns: whether a standalone notification must be sent for
this state update, respective if a chat state
notification must be included with the message.
:raises ValueError: if `message` is true and a state other
than :data:`ACTIVE` is passed.
"""
if message:
if state != chatstates_xso.ChatState.ACTIVE:
raise ValueError(
"Only the state ACTIVE can be sent with messages."
)
elif self._state == state:
return False
self._state = state
return self._strategy.sending
|
Handle a state update.
:param state: the new chat state
:type state: :class:`~aioxmpp.chatstates.ChatState`
:param message: pass true to indicate that we handle the
:data:`ACTIVE` state that is implied by
sending a content message.
:type message: :class:`bool`
:returns: whether a standalone notification must be sent for
this state update, respective if a chat state
notification must be included with the message.
:raises ValueError: if `message` is true and a state other
than :data:`ACTIVE` is passed.
|
def getmany(self, *keys):
"""
Return a list of values corresponding to the keys in the iterable of
*keys*.
If a key is not present in the collection, its corresponding value will
be :obj:`None`.
.. note::
This method is not implemented by standard Python dictionary
classes.
"""
pickled_keys = (self._pickle_key(k) for k in keys)
pickled_values = self.redis.hmget(self.key, *pickled_keys)
ret = []
for k, v in zip(keys, pickled_values):
value = self.cache.get(k, self._unpickle(v))
ret.append(value)
return ret
|
Return a list of values corresponding to the keys in the iterable of
*keys*.
If a key is not present in the collection, its corresponding value will
be :obj:`None`.
.. note::
This method is not implemented by standard Python dictionary
classes.
|
def _get_listlike_indexer(self, key, axis, raise_missing=False):
"""
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Target labels
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique)
values : array-like
An indexer for the return object; -1 denotes keys not found
"""
o = self.obj
ax = o._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key,
kind=self.name)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis,
raise_missing=raise_missing)
return ax[indexer], indexer
if ax.is_unique:
# If we are trying to get actual keys from empty Series, we
# patiently wait for a KeyError later on - otherwise, convert
if len(ax) or not len(key):
key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
self._validate_read_indexer(keyarr, indexer,
o._get_axis_number(axis),
raise_missing=raise_missing)
return keyarr, indexer
|
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Target labels
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique)
values : array-like
An indexer for the return object; -1 denotes keys not found
|
def iter_sys(self):
"""
Iterate over sys_name, overall_sys, histo_sys.
overall_sys or histo_sys may be None for any given sys_name.
"""
names = self.sys_names()
for name in names:
osys = self.GetOverallSys(name)
hsys = self.GetHistoSys(name)
yield name, osys, hsys
|
Iterate over sys_name, overall_sys, histo_sys.
overall_sys or histo_sys may be None for any given sys_name.
|
def descend(self, remote, force=False):
""" Descend, possibly creating directories as needed """
remote_dirs = remote.split('/')
for directory in remote_dirs:
try:
self.conn.cwd(directory)
except Exception:
if force:
self.conn.mkd(directory)
self.conn.cwd(directory)
return self.conn.pwd()
|
Descend, possibly creating directories as needed
|
def striter(self):
"""Iterate over each (optionally padded) string element in RangeSet."""
pad = self.padding or 0
for i in self._sorted():
yield "%0*d" % (pad, i)
|
Iterate over each (optionally padded) string element in RangeSet.
|
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
|
Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
|
def download_image(self, image_type, image):
"""
Read file of a project and download it
:param image_type: Image type
:param image: The path of the image
:returns: A file stream
"""
url = self._getUrl("/{}/images/{}".format(image_type, image))
response = yield from self._session().request("GET", url, auth=self._auth)
if response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
return response
|
Read file of a project and download it
:param image_type: Image type
:param image: The path of the image
:returns: A file stream
|
def rewrite_kwargs(conn_type, kwargs, module_name=None):
"""
Manipulate connection keywords.
Modifieds keywords based on connection type.
There is an assumption here that the client has
already been created and that these keywords are being
passed into methods for interacting with various services.
Current modifications:
- if conn_type is not cloud and module is 'compute',
then rewrite project as name.
- if conn_type is cloud and module is 'storage',
then remove 'project' from dict.
:param conn_type: E.g. 'cloud' or 'general'
:type conn_type: ``str``
:param kwargs: Dictionary of keywords sent in by user.
:type kwargs: ``dict``
:param module_name: Name of specific module that will be loaded.
Default is None.
:type conn_type: ``str`` or None
:returns kwargs with client and module specific changes
:rtype: ``dict``
"""
if conn_type != 'cloud' and module_name != 'compute':
if 'project' in kwargs:
kwargs['name'] = 'projects/%s' % kwargs.pop('project')
if conn_type == 'cloud' and module_name == 'storage':
if 'project' in kwargs:
del kwargs['project']
return kwargs
|
Manipulate connection keywords.
Modifieds keywords based on connection type.
There is an assumption here that the client has
already been created and that these keywords are being
passed into methods for interacting with various services.
Current modifications:
- if conn_type is not cloud and module is 'compute',
then rewrite project as name.
- if conn_type is cloud and module is 'storage',
then remove 'project' from dict.
:param conn_type: E.g. 'cloud' or 'general'
:type conn_type: ``str``
:param kwargs: Dictionary of keywords sent in by user.
:type kwargs: ``dict``
:param module_name: Name of specific module that will be loaded.
Default is None.
:type conn_type: ``str`` or None
:returns kwargs with client and module specific changes
:rtype: ``dict``
|
def get_next_job_by_port(plugin_name, port, verify_job=True, conn=None):
"""
Deprecated - Use get_next_job
"""
return get_next_job(plugin_name, None, port, verify_job, conn)
|
Deprecated - Use get_next_job
|
def contains(ell, p, shell_only=False):
"""
Check to see whether point is inside
conic.
:param exact: Only solutions exactly on conic
are considered (default: False).
"""
v = augment(p)
_ = ell.solve(v)
return N.allclose(_,0) if shell_only else _ <= 0
|
Check to see whether point is inside
conic.
:param exact: Only solutions exactly on conic
are considered (default: False).
|
def _update_element(name, element_type, data, server=None):
'''
Update an element, including it's properties
'''
# Urlencode the name (names may have slashes)
name = quote(name, safe='')
# Update properties first
if 'properties' in data:
properties = []
for key, value in data['properties'].items():
properties.append({'name': key, 'value': value})
_api_post('{0}/{1}/property'.format(element_type, name), properties, server)
del data['properties']
# If the element only contained properties
if not data:
return unquote(name)
# Get the current data then merge updated data into it
update_data = _get_element(name, element_type, server, with_properties=False)
if update_data:
update_data.update(data)
else:
__context__['retcode'] = salt.defaults.exitcodes.SALT_BUILD_FAIL
raise CommandExecutionError('Cannot update {0}'.format(name))
# Finally, update the element
_api_post('{0}/{1}'.format(element_type, name), _clean_data(update_data), server)
return unquote(name)
|
Update an element, including it's properties
|
def load(self, context):
"""Returns the debugger plugin, if possible.
Args:
context: The TBContext flags including `add_arguments`.
Returns:
A DebuggerPlugin instance or None if it couldn't be loaded.
"""
if not (context.flags.debugger_data_server_grpc_port > 0 or
context.flags.debugger_port > 0):
return None
flags = context.flags
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
raise ImportError(
'To use the debugger plugin, you need to have TensorFlow installed:\n'
' pip install tensorflow')
try:
# pylint: disable=line-too-long,g-import-not-at-top
from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib
from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib
# pylint: enable=line-too-long,g-import-not-at-top
except ImportError as e:
e_type, e_value, e_traceback = sys.exc_info()
message = e.msg if hasattr(e, 'msg') else e.message # Handle py2 vs py3
if 'grpc' in message:
e_value = ImportError(
message +
'\n\nTo use the debugger plugin, you need to have '
'gRPC installed:\n pip install grpcio')
six.reraise(e_type, e_value, e_traceback)
if flags.debugger_port > 0:
interactive_plugin = (
interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context))
logger.info('Starting Interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
interactive_plugin.listen(flags.debugger_port)
return interactive_plugin
elif flags.debugger_data_server_grpc_port > 0:
noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)
logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)
return noninteractive_plugin
raise AssertionError()
|
Returns the debugger plugin, if possible.
Args:
context: The TBContext flags including `add_arguments`.
Returns:
A DebuggerPlugin instance or None if it couldn't be loaded.
|
def to_array(self, channels=2):
"""Return the array of multipliers for the dynamic"""
if channels == 1:
return self.volume_frames.reshape(-1, 1)
if channels == 2:
return np.tile(self.volume_frames, (2, 1)).T
raise Exception(
"RawVolume doesn't know what to do with %s channels" % channels)
|
Return the array of multipliers for the dynamic
|
async def extend(self, additional_time):
"""
Adds more time to an already acquired lock.
``additional_time`` can be specified as an integer or a float, both
representing the number of seconds to add.
"""
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return await self.do_extend(additional_time)
|
Adds more time to an already acquired lock.
``additional_time`` can be specified as an integer or a float, both
representing the number of seconds to add.
|
def readlist(self, fmt, **kwargs):
"""Interpret next bits according to format string(s) and return list.
fmt -- A single string or list of strings with comma separated tokens
describing how to interpret the next bits in the bitstring. Items
can also be integers, for reading new bitstring of the given length.
kwargs -- A dictionary or keyword-value pairs - the keywords used in the
format string will be replaced with their given value.
The position in the bitstring is advanced to after the read items.
Raises ReadError is not enough bits are available.
Raises ValueError if the format is not understood.
See the docstring for 'read' for token examples. 'pad' tokens are skipped
and not added to the returned list.
>>> h, b1, b2 = s.readlist('hex:20, bin:5, bin:3')
>>> i, bs1, bs2 = s.readlist(['uint:12', 10, 10])
"""
value, self._pos = self._readlist(fmt, self._pos, **kwargs)
return value
|
Interpret next bits according to format string(s) and return list.
fmt -- A single string or list of strings with comma separated tokens
describing how to interpret the next bits in the bitstring. Items
can also be integers, for reading new bitstring of the given length.
kwargs -- A dictionary or keyword-value pairs - the keywords used in the
format string will be replaced with their given value.
The position in the bitstring is advanced to after the read items.
Raises ReadError is not enough bits are available.
Raises ValueError if the format is not understood.
See the docstring for 'read' for token examples. 'pad' tokens are skipped
and not added to the returned list.
>>> h, b1, b2 = s.readlist('hex:20, bin:5, bin:3')
>>> i, bs1, bs2 = s.readlist(['uint:12', 10, 10])
|
def insert_into_table(table, data):
"""
SQL query for inserting data into table
:return: None
"""
fields = data['fields']
fields['date'] = datetime.datetime.now().date()
query = '('
for key in fields.keys():
query += key + ','
query = query[:-1:] + ")"
client.execute(f"INSERT INTO {table} {query} VALUES", [tuple(fields.values())])
|
SQL query for inserting data into table
:return: None
|
def ended(self):
"""We call this method when the function is finished."""
self._end_time = time.time()
if setting(key='memory_profile', expected_type=bool):
self._end_memory = get_free_memory()
|
We call this method when the function is finished.
|
def register_palette(self):
"""Converts pygmets style to urwid palatte"""
default = 'default'
palette = list(self.palette)
mapping = CONFIG['rgb_to_short']
for tok in self.style.styles.keys():
for t in tok.split()[::-1]:
st = self.style.styles[t]
if '#' in st:
break
if '#' not in st:
st = ''
st = st.split()
st.sort() # '#' comes before '[A-Za-z0-9]'
if len(st) == 0:
c = default
elif st[0].startswith('bg:'):
c = default
elif len(st[0]) == 7:
c = 'h' + rgb_to_short(st[0][1:], mapping)[0]
elif len(st[0]) == 4:
c = 'h' + rgb_to_short(st[0][1]*2 + st[0][2]*2 + st[0][3]*2, mapping)[0]
else:
c = default
a = urwid.AttrSpec(c, default, colors=256)
row = (tok, default, default, default, a.foreground, default)
palette.append(row)
self.loop.screen.register_palette(palette)
|
Converts pygmets style to urwid palatte
|
def compute_eigenvalues(in_prefix, out_prefix):
"""Computes the Eigenvalues using smartpca from Eigensoft.
:param in_prefix: the prefix of the input files.
:param out_prefix: the prefix of the output files.
:type in_prefix: str
:type out_prefix: str
Creates a "parameter file" used by smartpca and runs it.
"""
# First, we create the parameter file
with open(out_prefix + ".parameters", "w") as o_file:
print >>o_file, "genotypename: " + in_prefix + ".bed"
print >>o_file, "snpname: " + in_prefix + ".bim"
print >>o_file, "indivname: " + in_prefix + ".fam"
print >>o_file, "evecoutname: " + out_prefix + ".evec.txt"
print >>o_file, "evaloutname: " + out_prefix + ".eval.txt"
print >>o_file, "numoutlieriter: 0"
print >>o_file, "altnormstyle: NO"
# Executing smartpca
command = ["smartpca", "-p", out_prefix + ".parameters"]
runCommand(command)
|
Computes the Eigenvalues using smartpca from Eigensoft.
:param in_prefix: the prefix of the input files.
:param out_prefix: the prefix of the output files.
:type in_prefix: str
:type out_prefix: str
Creates a "parameter file" used by smartpca and runs it.
|
def lookup(self, key):
"""
Generate hash code for a key from the Minimal Perfect Hash (MPH)
Parameters
----------
Key : object
The item to generate a key for, this works best for keys that
are strings, or can be transformed fairly directly into bytes
Returns : int
The code for the given item
"""
assert self._mph
key = convert_to_bytes(key)
box = ffi.new('char[]', key)
try:
result = _cmph.cmph_search(self._mph, box, len(key))
return result
finally:
del box
|
Generate hash code for a key from the Minimal Perfect Hash (MPH)
Parameters
----------
Key : object
The item to generate a key for, this works best for keys that
are strings, or can be transformed fairly directly into bytes
Returns : int
The code for the given item
|
def is_installed(config):
"""Check for pindel installation on machine.
:param config: (dict) information from yaml(items[0]['config'])
:returns: (boolean) if pindel is installed
"""
try:
config_utils.get_program("pindel2vcf", config)
config_utils.get_program("pindel", config)
return True
except config_utils.CmdNotFound:
return False
|
Check for pindel installation on machine.
:param config: (dict) information from yaml(items[0]['config'])
:returns: (boolean) if pindel is installed
|
def _get_max_subplot_ids(fig):
"""
Given an input figure, return a dict containing the max subplot number
for each subplot type in the figure
Parameters
----------
fig: dict
A plotly figure dict
Returns
-------
dict
A dict from subplot type strings to integers indicating the largest
subplot number in the figure of that subplot type
"""
max_subplot_ids = {subplot_type: 0
for subplot_type in _subplot_types}
max_subplot_ids['xaxis'] = 0
max_subplot_ids['yaxis'] = 0
for trace in fig.get('data', []):
trace_type = trace.get('type', 'scatter')
subplot_types = _trace_to_subplot.get(trace_type, [])
for subplot_type in subplot_types:
subplot_prop_name = _get_subplot_prop_name(subplot_type)
subplot_val_prefix = _get_subplot_val_prefix(subplot_type)
subplot_val = trace.get(subplot_prop_name, subplot_val_prefix)
# extract trailing number (if any)
subplot_number = _get_subplot_number(subplot_val)
max_subplot_ids[subplot_type] = max(
max_subplot_ids[subplot_type], subplot_number)
return max_subplot_ids
|
Given an input figure, return a dict containing the max subplot number
for each subplot type in the figure
Parameters
----------
fig: dict
A plotly figure dict
Returns
-------
dict
A dict from subplot type strings to integers indicating the largest
subplot number in the figure of that subplot type
|
def get_credentials(self, *args, **kwargs):
"""
Retrieves the users from elastic.
"""
arguments, _ = self.argparser.parse_known_args()
if self.is_pipe and self.use_pipe:
return self.get_pipe(self.object_type)
elif arguments.tags or arguments.type or arguments.search or arguments.password or arguments.cracked or arguments.range or arguments.domain:
return self.argument_search()
else:
return self.search(*args, **kwargs)
|
Retrieves the users from elastic.
|
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
|
Hour, 12-hour format without leading zeros; i.e. '1' to '12
|
def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx
|
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
|
def _get_compressed_vlan_list(self, pvlan_ids):
"""Generate a compressed vlan list ready for XML using a vlan set.
Sample Use Case:
Input vlan set:
--------------
1 - s = set([11, 50, 25, 30, 15, 16, 3, 8, 2, 1])
2 - s = set([87, 11, 50, 25, 30, 15, 16, 3, 8, 2, 1, 88])
Returned compressed XML list:
----------------------------
1 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30', '50']
2 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30',
'50', '87-88']
"""
if not pvlan_ids:
return []
pvlan_list = list(pvlan_ids)
pvlan_list.sort()
compressed_list = []
begin = -1
prev_vlan = -1
for port_vlan in pvlan_list:
if prev_vlan == -1:
prev_vlan = port_vlan
else:
if (port_vlan - prev_vlan) == 1:
if begin == -1:
begin = prev_vlan
prev_vlan = port_vlan
else:
if begin == -1:
compressed_list.append(str(prev_vlan))
else:
compressed_list.append("%d-%d" % (begin, prev_vlan))
begin = -1
prev_vlan = port_vlan
if begin == -1:
compressed_list.append(str(prev_vlan))
else:
compressed_list.append("%s-%s" % (begin, prev_vlan))
return compressed_list
|
Generate a compressed vlan list ready for XML using a vlan set.
Sample Use Case:
Input vlan set:
--------------
1 - s = set([11, 50, 25, 30, 15, 16, 3, 8, 2, 1])
2 - s = set([87, 11, 50, 25, 30, 15, 16, 3, 8, 2, 1, 88])
Returned compressed XML list:
----------------------------
1 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30', '50']
2 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30',
'50', '87-88']
|
def validate_alias(self, name, cmd):
"""Validate an alias and return the its number of arguments."""
if name in self.no_alias:
raise InvalidAliasError("The name %s can't be aliased "
"because it is a keyword or builtin." % name)
if not (isinstance(cmd, basestring)):
raise InvalidAliasError("An alias command must be a string, "
"got: %r" % cmd)
nargs = cmd.count('%s')
if nargs>0 and cmd.find('%l')>=0:
raise InvalidAliasError('The %s and %l specifiers are mutually '
'exclusive in alias definitions.')
return nargs
|
Validate an alias and return the its number of arguments.
|
def get_public_events(self):
"""
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events/public",
None
)
|
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
|
def validate_email_with_regex(email_address):
"""
Note that this will only filter out syntax mistakes in emailaddresses.
If a human would think it is probably a valid email, it will most likely pass.
However, it could still very well be that the actual emailaddress has simply
not be claimed by anyone (so then this function fails to devalidate).
"""
if not re.match(VALID_ADDRESS_REGEXP, email_address):
emsg = 'Emailaddress "{}" is not valid according to RFC 2822 standards'.format(
email_address)
raise YagInvalidEmailAddress(emsg)
# apart from the standard, I personally do not trust email addresses without dot.
if "." not in email_address and "localhost" not in email_address.lower():
raise YagInvalidEmailAddress("Missing dot in emailaddress")
|
Note that this will only filter out syntax mistakes in emailaddresses.
If a human would think it is probably a valid email, it will most likely pass.
However, it could still very well be that the actual emailaddress has simply
not be claimed by anyone (so then this function fails to devalidate).
|
def by_version(cls, session, package_name, version):
"""
Get release for a given version.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param package_name: package name
:type package_name: unicode
:param version: version
:type version: unicode
:return: release instance
:rtype: :class:`pyshop.models.Release`
"""
return cls.first(session,
join=(Package,),
where=((Package.name == package_name),
(cls.version == version)))
|
Get release for a given version.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param package_name: package name
:type package_name: unicode
:param version: version
:type version: unicode
:return: release instance
:rtype: :class:`pyshop.models.Release`
|
def GetHashCode(self):
"""uint32 identifier"""
slice_length = 4 if len(self.Data) >= 4 else len(self.Data)
return int.from_bytes(self.Data[:slice_length], 'little')
|
uint32 identifier
|
def prepend_urls(self):
""" Add the following array of urls to the Tileset base urls """
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/generate%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('generate'), name="api_tileset_generate"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('download'), name="api_tileset_download"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/status%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('status'), name="api_tileset_status"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/stop%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('stop'), name="api_tileset_stop"),
]
|
Add the following array of urls to the Tileset base urls
|
def append_data(file_strings, file_fmt, tag):
""" Load the SuperMAG files
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
file_fmt : str
String denoting file type (ascii or csv)
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
# Determine the right appending routine for the file type
if file_fmt.lower() == "csv":
return append_csv_data(file_strings)
else:
return append_ascii_data(file_strings, tag)
|
Load the SuperMAG files
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
file_fmt : str
String denoting file type (ascii or csv)
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file
|
def Tt(CASRN, AvailableMethods=False, Method=None):
r'''This function handles the retrieval of a chemical's triple temperature.
Lookup is based on CASRNs. Will automatically select a data source to use
if no Method is provided; returns None if the data is not available.
Returns data from [1]_, or a chemical's melting point if available.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Tt : float
Triple point temperature, [K]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Tt with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
Tt_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
the Tt for the desired chemical, and will return methods
instead of the Tt
Notes
-----
Median difference between melting points and triple points is 0.02 K.
Accordingly, this should be more than good enough for engineering
applications.
Temperatures are on the ITS-68 scale.
Examples
--------
Ammonia
>>> Tt('7664-41-7')
195.47999999999999
References
----------
.. [1] Staveley, L. A. K., L. Q. Lobo, and J. C. G. Calado. "Triple-Points
of Low Melting Substances and Their Use in Cryogenic Work." Cryogenics
21, no. 3 (March 1981): 131-144. doi:10.1016/0011-2275(81)90264-2.
'''
def list_methods():
methods = []
if CASRN in Staveley_data.index:
methods.append(STAVELEY)
if Tm(CASRN):
methods.append(MELTING)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == STAVELEY:
Tt = Staveley_data.at[CASRN, "Tt68"]
elif Method == MELTING:
Tt = Tm(CASRN)
elif Method == NONE:
Tt = None
else:
raise Exception('Failure in in function')
return Tt
|
r'''This function handles the retrieval of a chemical's triple temperature.
Lookup is based on CASRNs. Will automatically select a data source to use
if no Method is provided; returns None if the data is not available.
Returns data from [1]_, or a chemical's melting point if available.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Tt : float
Triple point temperature, [K]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Tt with the
given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
Tt_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
the Tt for the desired chemical, and will return methods
instead of the Tt
Notes
-----
Median difference between melting points and triple points is 0.02 K.
Accordingly, this should be more than good enough for engineering
applications.
Temperatures are on the ITS-68 scale.
Examples
--------
Ammonia
>>> Tt('7664-41-7')
195.47999999999999
References
----------
.. [1] Staveley, L. A. K., L. Q. Lobo, and J. C. G. Calado. "Triple-Points
of Low Melting Substances and Their Use in Cryogenic Work." Cryogenics
21, no. 3 (March 1981): 131-144. doi:10.1016/0011-2275(81)90264-2.
|
def type_name(value):
"""Returns pseudo-YAML type name of given value."""
return type(value).__name__ if isinstance(value, EncapsulatedNode) else \
"struct" if isinstance(value, dict) else \
"sequence" if isinstance(value, (tuple, list)) else \
type(value).__name__
|
Returns pseudo-YAML type name of given value.
|
def dot(self, other):
"""Calculates the dot product of this vector and another vector."""
dot_product = 0
a = self.elements
b = other.elements
a_len = len(a)
b_len = len(b)
i = j = 0
while i < a_len and j < b_len:
a_val = a[i]
b_val = b[j]
if a_val < b_val:
i += 2
elif a_val > b_val:
j += 2
else:
dot_product += a[i + 1] * b[j + 1]
i += 2
j += 2
return dot_product
|
Calculates the dot product of this vector and another vector.
|
def sample_following_dist(handle_iter, n, totalf):
"""Samples n passwords following the distribution from the handle
@handle_iter is an iterator that gives (pw,f) @n is the total
number of samle asked for @totalf is the total number of users,
which is euqal to sum(f for pw,f in handle_iter)
As, handle_iterator is an iterator and can only traverse once.
@totalf needs to be supplied to the funciton.
@handle_iter must be sorted in decreasing order
Returns, an array of @n tuples (id, pw) sampled from @handle_iter.
"""
multiplier = 1.0
if totalf == 1.0:
multiplier = 1e8
# print "WARNING!! I don't except probabilities"
totalf = totalf * multiplier
# print("# Population Size", totalf)
A = gen_n_random_num(n, totalf, unique=False)
A.sort(reverse=True)
# Uniqueness check, non necessarily required, but not very
# computationally intensive
assert len(A) == n, "Not enough randomnumbers generated" \
"Requried {}, generated only {}".format(n, len(A))
# if not all(A[i] != A[i-1] for i in range(1,n,1)):
# for i in range(1,n,1):
# if A[i] == A[i-1]:
# print i, A[i], A[i-1]
j, sampled = 0, 0
val = A.pop()
# print(handle_iter)
for w, f in handle_iter:
j += f * multiplier
while val < j:
sampled += 1
if sampled % 5000 == 0:
print("Sampled:", sampled)
yield (val, w)
if A:
val = A.pop()
else:
val = -1
break
if not A and val == -1:
break
# print("# Stopped at:", w, f, j, '\n', file=sys.stderr)
while A and val < j:
yield (val, w)
if A:
i, val = A.pop()
else:
break
|
Samples n passwords following the distribution from the handle
@handle_iter is an iterator that gives (pw,f) @n is the total
number of samle asked for @totalf is the total number of users,
which is euqal to sum(f for pw,f in handle_iter)
As, handle_iterator is an iterator and can only traverse once.
@totalf needs to be supplied to the funciton.
@handle_iter must be sorted in decreasing order
Returns, an array of @n tuples (id, pw) sampled from @handle_iter.
|
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True,
gzip_encoded=False, **kwds):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size,
close_stream=False, auto_transfer=auto_transfer,
gzip_encoded=gzip_encoded, **kwds)
|
Create a new Upload object from a stream.
|
def stream_buckets(self, bucket_type=None, timeout=None):
"""
Stream list of buckets through an iterator
"""
if not self.bucket_stream():
raise NotImplementedError('Streaming list-buckets is not '
"supported on %s" %
self.server_version.vstring)
bucket_type = self._get_bucket_type(bucket_type)
url = self.bucket_list_path(bucket_type=bucket_type,
buckets="stream", timeout=timeout)
status, headers, response = self._request('GET', url, stream=True)
if status == 200:
return HttpBucketStream(response)
else:
raise RiakError('Error listing buckets.')
|
Stream list of buckets through an iterator
|
def rebin(self, factor):
"""
I robustly rebin your image by a given factor.
You simply specify a factor, and I will eventually take care of a crop to bring
the image to interger-multiple-of-your-factor dimensions.
Note that if you crop your image before, you must directly crop to compatible dimensions !
We update the binfactor, this allows you to draw on the image later, still using the
orignial pixel coordinates.
Here we work on the numpy array.
"""
if self.pilimage != None:
raise RuntimeError, "Cannot rebin anymore, PIL image already exists !"
if type(factor) != type(0):
raise RuntimeError, "Rebin factor must be an integer !"
if factor < 1:
return
origshape = np.asarray(self.numpyarray.shape)
neededshape = origshape - (origshape % factor)
if not (origshape == neededshape).all():
if self.verbose :
print "Rebinning %ix%i : I have to crop from %s to %s" % (factor, factor, origshape, neededshape)
self.crop(0, neededshape[0], 0, neededshape[1])
else:
if self.verbose :
print "Rebinning %ix%i : I do not need to crop" % (factor, factor)
self.numpyarray = rebin(self.numpyarray, neededshape/factor) # we call the rebin function defined below
# The integer division neededshape/factor is ok, we checked for this above.
self.binfactor = int(self.binfactor * factor)
|
I robustly rebin your image by a given factor.
You simply specify a factor, and I will eventually take care of a crop to bring
the image to interger-multiple-of-your-factor dimensions.
Note that if you crop your image before, you must directly crop to compatible dimensions !
We update the binfactor, this allows you to draw on the image later, still using the
orignial pixel coordinates.
Here we work on the numpy array.
|
def list_udas(self, database=None, like=None):
"""
Lists all UDAFs associated with a given database
Parameters
----------
database : string
like : string for searching (optional)
"""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=True)
with self._execute(statement, results=True) as cur:
result = self._get_udfs(cur, udf.ImpalaUDA)
return result
|
Lists all UDAFs associated with a given database
Parameters
----------
database : string
like : string for searching (optional)
|
def get_keys(self, alias_name, key_format):
"""
Retrieves the contents of PKCS12 file in the format specified.
This PKCS12 formatted file contains both the certificate as well as the key file data.
Valid key formats are Base64 and PKCS12.
Args:
alias_name: Key pair associated with the RabbitMQ
key_format: Valid key formats are Base64 and PKCS12.
Returns:
dict: RabbitMQ certificate
"""
uri = self.URI + "/keys/" + alias_name + "?format=" + key_format
return self._client.get(uri)
|
Retrieves the contents of PKCS12 file in the format specified.
This PKCS12 formatted file contains both the certificate as well as the key file data.
Valid key formats are Base64 and PKCS12.
Args:
alias_name: Key pair associated with the RabbitMQ
key_format: Valid key formats are Base64 and PKCS12.
Returns:
dict: RabbitMQ certificate
|
def set_password(self, password = None):
"""This method is used to set the password.
password must be a string.
"""
if password is None or type(password) is not str:
raise KPError("Need a new image number")
else:
self.password = password
self.last_mod = datetime.now().replace(microsecond=0)
return True
|
This method is used to set the password.
password must be a string.
|
def parse_selfsm(self, f):
""" Go through selfSM file and create a dictionary with the sample name as a key, """
#create a dictionary to populate from this sample's file
parsed_data = dict()
# set a empty variable which denotes if the headers have been read
headers = None
# for each line in the file
for l in f['f'].splitlines():
# split the line on tab
s = l.split("\t")
# if we haven't already read the header line
if headers is None:
# assign this list to headers variable
headers = s
# for all rows after the first
else:
# clean the sample name (first column) and assign to s_name
s_name = self.clean_s_name(s[0], f['root'])
# create a dictionary entry with the first column as a key (sample name) and empty dictionary as a value
parsed_data[s_name] = {}
# for each item in list of items in the row
for i, v in enumerate(s):
# if it's not the first element (if it's not the name)
if i != 0:
# see if CHIP is in the column header and the value is not NA
if "CHIP" in [headers[i]] and v != "NA":
# set hide_chip_columns = False so they are not hidden
self.hide_chip_columns=False
# try and convert the value into a float
try:
# and add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = float(v)
#if can't convert to float...
except ValueError:
# add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = v
# else return the dictionary
return parsed_data
|
Go through selfSM file and create a dictionary with the sample name as a key,
|
def _wrapper(func):
"""
Wraps a generated function so that it catches all Type- and ValueErrors
and raises IntoDPValueErrors.
:param func: the transforming function
"""
@functools.wraps(func)
def the_func(expr):
"""
The actual function.
:param object expr: the expression to be xformed to dbus-python types
"""
try:
return func(expr)
except (TypeError, ValueError) as err:
raise IntoDPValueError(expr, "expr", "could not be transformed") \
from err
return the_func
|
Wraps a generated function so that it catches all Type- and ValueErrors
and raises IntoDPValueErrors.
:param func: the transforming function
|
def _pollCallStatus(self, expectedState, callId=None, timeout=None):
""" Poll the status of outgoing calls.
This is used for modems that do not have a known set of call status update notifications.
:param expectedState: The internal state we are waiting for. 0 == initiated, 1 == answered, 2 = hangup
:type expectedState: int
:raise TimeoutException: If a timeout was specified, and has occurred
"""
callDone = False
timeLeft = timeout or 999999
while self.alive and not callDone and timeLeft > 0:
time.sleep(0.5)
if expectedState == 0: # Only call initializing can timeout
timeLeft -= 0.5
try:
clcc = self._pollCallStatusRegex.match(self.write('AT+CLCC')[0])
except TimeoutException as timeout:
# Can happend if the call was ended during our time.sleep() call
clcc = None
if clcc:
direction = int(clcc.group(2))
if direction == 0: # Outgoing call
# Determine call state
stat = int(clcc.group(3))
if expectedState == 0: # waiting for call initiated
if stat == 2 or stat == 3: # Dialing or ringing ("alerting")
callId = int(clcc.group(1))
callType = int(clcc.group(4))
self._handleCallInitiated(None, callId, callType) # if self_dialEvent is None, this does nothing
expectedState = 1 # Now wait for call answer
elif expectedState == 1: # waiting for call to be answered
if stat == 0: # Call active
callId = int(clcc.group(1))
self._handleCallAnswered(None, callId)
expectedState = 2 # Now wait for call hangup
elif expectedState == 2 : # waiting for remote hangup
# Since there was no +CLCC response, the call is no longer active
callDone = True
self._handleCallEnded(None, callId=callId)
elif expectedState == 1: # waiting for call to be answered
# Call was rejected
callDone = True
self._handleCallRejected(None, callId=callId)
if timeLeft <= 0:
raise TimeoutException()
|
Poll the status of outgoing calls.
This is used for modems that do not have a known set of call status update notifications.
:param expectedState: The internal state we are waiting for. 0 == initiated, 1 == answered, 2 = hangup
:type expectedState: int
:raise TimeoutException: If a timeout was specified, and has occurred
|
def get_info_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
"""
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_data(url)
|
Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
|
def _pwm_to_str(self, precision=4):
"""Return string representation of pwm.
Parameters
----------
precision : int, optional, default 4
Floating-point precision.
Returns
-------
pwm_string : str
"""
if not self.pwm:
return ""
fmt = "{{:.{:d}f}}".format(precision)
return "\n".join(
["\t".join([fmt.format(p) for p in row])
for row in self.pwm]
)
|
Return string representation of pwm.
Parameters
----------
precision : int, optional, default 4
Floating-point precision.
Returns
-------
pwm_string : str
|
def _add_domains_xml(self, document):
"""
Generates the XML elements for allowed domains.
"""
for domain, attrs in self.domains.items():
domain_element = document.createElement('allow-access-from')
domain_element.setAttribute('domain', domain)
if attrs['to_ports'] is not None:
domain_element.setAttribute(
'to-ports',
','.join(attrs['to_ports'])
)
if not attrs['secure']:
domain_element.setAttribute('secure', 'false')
document.documentElement.appendChild(domain_element)
|
Generates the XML elements for allowed domains.
|
def write_composer_operation_log(filename):
"""
Writes the composed operation log from featuremonkey's Composer to a json file.
:param filename:
:return:
"""
from featuremonkey.tracing import serializer
from featuremonkey.tracing.logger import OPERATION_LOG
ol = copy.deepcopy(OPERATION_LOG)
ol = serializer.serialize_operation_log(ol)
with open(filename, 'w+') as operation_log_file:
operation_log_file.write(json.dumps(ol, indent=4, encoding="utf8"))
|
Writes the composed operation log from featuremonkey's Composer to a json file.
:param filename:
:return:
|
def output_tap(self):
"""Output analysis results in TAP format."""
tracker = Tracker(streaming=True, stream=sys.stdout)
for group in self.config.analysis_groups:
n_providers = len(group.providers)
n_checkers = len(group.checkers)
if not group.providers and group.checkers:
test_suite = group.name
description_lambda = lambda r: r.checker.name
elif not group.checkers:
logger.warning(
'Invalid analysis group (no checkers), skipping')
continue
elif n_providers > n_checkers:
test_suite = group.checkers[0].name
description_lambda = lambda r: r.provider.name
else:
test_suite = group.providers[0].name
description_lambda = lambda r: r.checker.name
for result in group.results:
description = description_lambda(result)
if result.code == ResultCode.PASSED:
tracker.add_ok(test_suite, description)
elif result.code == ResultCode.IGNORED:
tracker.add_ok(
test_suite, description + ' (ALLOWED FAILURE)')
elif result.code == ResultCode.NOT_IMPLEMENTED:
tracker.add_not_ok(
test_suite, description, 'TODO implement the test')
elif result.code == ResultCode.FAILED:
tracker.add_not_ok(
test_suite, description,
diagnostics=' ---\n message: %s\n hint: %s\n ...' % (
'\n message: '.join(result.messages.split('\n')),
result.checker.hint))
|
Output analysis results in TAP format.
|
def target_socket(self, config):
""" This method overrides :meth:`.WNetworkNativeTransport.target_socket` method. Do the same thing as
basic method do, but also checks that the result address is IPv4 multicast address.
:param config: beacon configuration
:return: WIPV4SocketInfo
"""
target = WUDPNetworkNativeTransport.target_socket(self, config)
if WNetworkIPV4.is_multicast(target.address()) is False:
raise ValueError('IP multicast address not RFC compliant')
return target
|
This method overrides :meth:`.WNetworkNativeTransport.target_socket` method. Do the same thing as
basic method do, but also checks that the result address is IPv4 multicast address.
:param config: beacon configuration
:return: WIPV4SocketInfo
|
def tree():
"""Example showing tree progress view"""
#############
# Test data #
#############
# For this example, we're obviously going to be feeding fictitious data
# to ProgressTree, so here it is
leaf_values = [Value(0) for i in range(6)]
bd_defaults = dict(type=Bar, kwargs=dict(max_value=10))
test_d = {
"Warp Jump": {
"1) Prepare fuel": {
"Load Tanks": {
"Tank 1": BarDescriptor(value=leaf_values[0], **bd_defaults),
"Tank 2": BarDescriptor(value=leaf_values[1], **bd_defaults),
},
"Refine tylium ore": BarDescriptor(
value=leaf_values[2], **bd_defaults
),
},
"2) Calculate jump co-ordinates": {
"Resolve common name to co-ordinates": {
"Querying resolution from baseship": BarDescriptor(
value=leaf_values[3], **bd_defaults
),
},
},
"3) Perform jump": {
"Check FTL drive readiness": BarDescriptor(
value=leaf_values[4], **bd_defaults
),
"Juuuuuump!": BarDescriptor(value=leaf_values[5],
**bd_defaults)
}
}
}
# We'll use this function to bump up the leaf values
def incr_value(obj):
for val in leaf_values:
if val.value < 10:
val.value += 1
break
# And this to check if we're to stop drawing
def are_we_done(obj):
return all(val.value == 10 for val in leaf_values)
###################
# The actual code #
###################
# Create blessings.Terminal instance
t = Terminal()
# Initialize a ProgressTree instance
n = ProgressTree(term=t)
# We'll use the make_room method to make sure the terminal
# is filled out with all the room we need
n.make_room(test_d)
while not are_we_done(test_d):
sleep(0.2 * random.random())
# After the cursor position is first saved (in the first draw call)
# this will restore the cursor back to the top so we can draw again
n.cursor.restore()
# We use our incr_value method to bump the fake numbers
incr_value(test_d)
# Actually draw out the bars
n.draw(test_d, BarDescriptor(bd_defaults))
|
Example showing tree progress view
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.