code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def chunks_str(str, n, separator="\n", fill_blanks_last=True):
"""returns lines with max n characters
:Example:
>>> print (chunks_str('123456X', 3))
123
456
X
"""
return separator.join(chunks(str, n))
|
returns lines with max n characters
:Example:
>>> print (chunks_str('123456X', 3))
123
456
X
|
def ifilter(self, recursive=True, matches=None, flags=FLAGS,
forcetype=None):
"""Iterate over nodes in our list matching certain conditions.
If *forcetype* is given, only nodes that are instances of this type (or
tuple of types) are yielded. Setting *recursive* to ``True`` will
iterate over all children and their descendants. ``RECURSE_OTHERS``
will only iterate over children that are not the instances of
*forcetype*. ``False`` will only iterate over immediate children.
``RECURSE_OTHERS`` can be used to iterate over all un-nested templates,
even if they are inside of HTML tags, like so:
>>> code = mwparserfromhell.parse("{{foo}}<b>{{foo|{{bar}}}}</b>")
>>> code.filter_templates(code.RECURSE_OTHERS)
["{{foo}}", "{{foo|{{bar}}}}"]
*matches* can be used to further restrict the nodes, either as a
function (taking a single :class:`.Node` and returning a boolean) or a
regular expression (matched against the node's string representation
with :func:`re.search`). If *matches* is a regex, the flags passed to
:func:`re.search` are :const:`re.IGNORECASE`, :const:`re.DOTALL`, and
:const:`re.UNICODE`, but custom flags can be specified by passing
*flags*.
"""
gen = self._indexed_ifilter(recursive, matches, flags, forcetype)
return (node for i, node in gen)
|
Iterate over nodes in our list matching certain conditions.
If *forcetype* is given, only nodes that are instances of this type (or
tuple of types) are yielded. Setting *recursive* to ``True`` will
iterate over all children and their descendants. ``RECURSE_OTHERS``
will only iterate over children that are not the instances of
*forcetype*. ``False`` will only iterate over immediate children.
``RECURSE_OTHERS`` can be used to iterate over all un-nested templates,
even if they are inside of HTML tags, like so:
>>> code = mwparserfromhell.parse("{{foo}}<b>{{foo|{{bar}}}}</b>")
>>> code.filter_templates(code.RECURSE_OTHERS)
["{{foo}}", "{{foo|{{bar}}}}"]
*matches* can be used to further restrict the nodes, either as a
function (taking a single :class:`.Node` and returning a boolean) or a
regular expression (matched against the node's string representation
with :func:`re.search`). If *matches* is a regex, the flags passed to
:func:`re.search` are :const:`re.IGNORECASE`, :const:`re.DOTALL`, and
:const:`re.UNICODE`, but custom flags can be specified by passing
*flags*.
|
def _sampleLocationOnDisc(self, top=None):
"""
Helper method to sample from the top and bottom discs of a cylinder.
If top is set to True, samples only from top disc. If top is set to False,
samples only from bottom disc. If not set (defaults to None), samples from
both discs.
"""
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
sampledRadius = self.radius * sqrt(random.random())
x, y = sampledRadius * cos(sampledAngle), sampledRadius * sin(sampledAngle)
return [x, y, z]
|
Helper method to sample from the top and bottom discs of a cylinder.
If top is set to True, samples only from top disc. If top is set to False,
samples only from bottom disc. If not set (defaults to None), samples from
both discs.
|
def save(self, overwrite=True):
"""
Saves PopulationSet and TransitSignal.
Shouldn't need to use this if you're using
:func:`FPPCalculation.from_ini`.
Saves :class`PopulationSet` to ``[folder]/popset.h5]``
and :class:`TransitSignal` to ``[folder]/trsig.pkl``.
:param overwrite: (optional)
Whether to overwrite existing files.
"""
self.save_popset(overwrite=overwrite)
self.save_signal()
|
Saves PopulationSet and TransitSignal.
Shouldn't need to use this if you're using
:func:`FPPCalculation.from_ini`.
Saves :class`PopulationSet` to ``[folder]/popset.h5]``
and :class:`TransitSignal` to ``[folder]/trsig.pkl``.
:param overwrite: (optional)
Whether to overwrite existing files.
|
def getWhatIf(number):
""" Returns a :class:`WhatIf` object corresponding to the What If article of
index passed to the function. If the index is less than zero or
greater than the maximum number of articles published thus far,
None is returned instead.
Like all the routines for handling What If articles, :func:`getWhatIfArchive`
is called first in order to establish a list of all previously published
What Ifs.
Arguments:
number: an integer or string that represents a number, this is the index of article to retrieve.
Returns the resulting :class:`WhatIf` object."""
archive = getWhatIfArchive()
latest = getLatestWhatIfNum(archive)
if type(number) is str and number.isdigit():
number = int(number)
if number > latest or latest <= 0:
return None
return archive[number]
|
Returns a :class:`WhatIf` object corresponding to the What If article of
index passed to the function. If the index is less than zero or
greater than the maximum number of articles published thus far,
None is returned instead.
Like all the routines for handling What If articles, :func:`getWhatIfArchive`
is called first in order to establish a list of all previously published
What Ifs.
Arguments:
number: an integer or string that represents a number, this is the index of article to retrieve.
Returns the resulting :class:`WhatIf` object.
|
def input(self, *args, **kwargs):
"""
Adapt the input and check for errors.
Returns a tuple of adapted (args, kwargs) or raises
AnticipateErrors
"""
errors = []
if args and self.arg_names:
args = list(args)
# Replace args inline that have adapters
for i, (key, val) in enumerate(izip(self.arg_names, args)):
try:
args[i] = self._adapt_param(key, val)
except AnticipateParamError as e:
errors.append(e)
args = tuple(args)
if kwargs and self.params:
# Adapt all adaptable arguments
for key, val in kwargs.items():
try:
kwargs[key] = self._adapt_param(key, val)
except AnticipateParamError as e:
errors.append(e)
if errors:
raise AnticipateErrors(
message='Invalid input for %s' % self.func,
errors=errors)
return args, kwargs
|
Adapt the input and check for errors.
Returns a tuple of adapted (args, kwargs) or raises
AnticipateErrors
|
def set_attribute(self, obj, attr, value):
"""Set value of attribute in given object instance.
Reason for existence of this method is the fact that 'attribute' can
be also a object's key if it is a dict or any other kind of mapping.
Args:
obj (object): object instance to modify
attr (str): attribute (or key) to change
value: value to set
"""
# if this is any mutable mapping then instead of attributes use keys
if isinstance(obj, MutableMapping):
obj[attr] = value
else:
setattr(obj, attr, value)
|
Set value of attribute in given object instance.
Reason for existence of this method is the fact that 'attribute' can
be also a object's key if it is a dict or any other kind of mapping.
Args:
obj (object): object instance to modify
attr (str): attribute (or key) to change
value: value to set
|
def add(self, relation):
"""Add the relation inner to the cache, under the schema schema and
identifier identifier
:param BaseRelation relation: The underlying relation.
"""
cached = _CachedRelation(relation)
logger.debug('Adding relation: {!s}'.format(cached))
logger.debug('before adding: {}'.format(
pprint.pformat(self.dump_graph()))
)
with self.lock:
self._setdefault(cached)
logger.debug('after adding: {}'.format(
pprint.pformat(self.dump_graph()))
)
|
Add the relation inner to the cache, under the schema schema and
identifier identifier
:param BaseRelation relation: The underlying relation.
|
def get_parsed_data(fn, *args, **kwargs):
"""All above functions as a single function
:param str fn: file name
:return list parsed_data: structured metadata
"""
file_format = detect_format(fn, *args, **kwargs)
data = get_header(fn, file_format, *args, **kwargs)
parsed_data = parse_header(data, *args, **kwargs)
return parsed_data
|
All above functions as a single function
:param str fn: file name
:return list parsed_data: structured metadata
|
def listBlockChildren(self, block_name=""):
"""
list parents of a block
"""
if (not block_name) or re.search("['%','*']", block_name):
dbsExceptionHandler("dbsException-invalid-input", "DBSBlock/listBlockChildren. Block_name must be provided." )
conn = self.dbi.connection()
try:
results = self.blockchildlist.execute(conn, block_name)
return results
finally:
if conn:
conn.close()
|
list parents of a block
|
def _get_job_status(line):
"""magic used as an endpoint for client to get job status.
%_get_job_status <name>
Returns:
A JSON object of the job status.
"""
try:
args = line.strip().split()
job_name = args[0]
job = None
if job_name in _local_jobs:
job = _local_jobs[job_name]
else:
raise Exception('invalid job %s' % job_name)
if job is not None:
error = '' if job.fatal_error is None else str(job.fatal_error)
data = {'exists': True, 'done': job.is_complete, 'error': error}
else:
data = {'exists': False}
except Exception as e:
google.datalab.utils.print_exception_with_last_stack(e)
data = {'done': True, 'error': str(e)}
return IPython.core.display.JSON(data)
|
magic used as an endpoint for client to get job status.
%_get_job_status <name>
Returns:
A JSON object of the job status.
|
def _get_github(self):
"""Creates an instance of github.Github to interact with the repos via the
API interface in pygithub.
"""
from github import Github
vms("Querying github with user '{}'.".format(self.username))
g = Github(self.username, self.apikey)
self._user = g.get_user()
if self._user is None:
raise ValueError("Can't authenticate to github with '{}'.".format(self.username))
#The github user authenticating always has to be specified; however the user
#may not be able to see the repo, even if it has access to it. We may need
#to check the organization repos.
if self.organization is not None:
self._org = g.get_organization(self.organization)
vms("Found github organization '{}'.".format(self._org.name), 2)
#Next we need to find this repository in the lists available to both
#the user *and* the organization. If they specified an organization, then we
#should check that first/exclusively.
for repo in self._org.get_repos():
if repo.full_name.lower() == self.name.lower():
self._repo = repo
vms("Found organization repository '{}'.".format(self._repo.full_name), 2)
break
else:
for repo in self._user.get_repos():
if repo.full_name.lower() == self.name.lower():
self._repo = repo
vms("Found user repository '{}'.".format(self._repo.full_name), 2)
break
|
Creates an instance of github.Github to interact with the repos via the
API interface in pygithub.
|
def update(self, _values=None, **values):
"""
Update a record in the database
:param values: The values of the update
:type values: dict
:return: The number of records affected
:rtype: int
"""
if _values is not None:
values.update(_values)
values = OrderedDict(sorted(values.items()))
bindings = list(values.values()) + self.get_bindings()
sql = self._grammar.compile_update(self, values)
return self._connection.update(sql, self._clean_bindings(bindings))
|
Update a record in the database
:param values: The values of the update
:type values: dict
:return: The number of records affected
:rtype: int
|
def create(self, data, **kwargs):
"""Create a new object.
Args:
data (dict): parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
RESTObject, RESTObject: The source and target issues
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
"""
self._check_missing_create_attrs(data)
server_data = self.gitlab.http_post(self.path, post_data=data,
**kwargs)
source_issue = ProjectIssue(self._parent.manager,
server_data['source_issue'])
target_issue = ProjectIssue(self._parent.manager,
server_data['target_issue'])
return source_issue, target_issue
|
Create a new object.
Args:
data (dict): parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
RESTObject, RESTObject: The source and target issues
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
|
def list_functions(region=None, key=None, keyid=None, profile=None):
'''
List all Lambda functions visible in the current scope.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.list_functions
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = []
for funcs in __utils__['boto3.paged_call'](conn.list_functions):
ret += funcs['Functions']
return ret
|
List all Lambda functions visible in the current scope.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.list_functions
|
def smooth_angle_channels(self, channels):
"""Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions."""
for vertex in self.vertices:
for col in vertex.meta['rot_ind']:
if col:
for k in range(1, channels.shape[0]):
diff=channels[k, col]-channels[k-1, col]
if abs(diff+360.)<abs(diff):
channels[k:, col]=channels[k:, col]+360.
elif abs(diff-360.)<abs(diff):
channels[k:, col]=channels[k:, col]-360.
|
Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions.
|
def rates(ctx, opts):
"""Check current API rate limits."""
click.echo("Retrieving rate limits ... ", nl=False)
context_msg = "Failed to retrieve status!"
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
resources_limits = get_rate_limits()
click.secho("OK", fg="green")
headers = ["Resource", "Throttled", "Remaining", "Interval (Seconds)", "Reset"]
rows = []
for resource, limits in six.iteritems(resources_limits):
rows.append(
[
click.style(resource, fg="cyan"),
click.style(
"Yes" if limits.throttled else "No",
fg="red" if limits.throttled else "green",
),
"%(remaining)s/%(limit)s"
% {
"remaining": click.style(
six.text_type(limits.remaining), fg="yellow"
),
"limit": click.style(six.text_type(limits.limit), fg="yellow"),
},
click.style(six.text_type(limits.interval), fg="blue"),
click.style(six.text_type(limits.reset), fg="magenta"),
]
)
if resources_limits:
click.echo()
utils.pretty_print_table(headers, rows)
click.echo()
num_results = len(resources_limits)
list_suffix = "resource%s" % ("s" if num_results != 1 else "")
utils.pretty_print_list_info(num_results=num_results, suffix=list_suffix)
|
Check current API rate limits.
|
def _prompt(pre_prompt, items, post_prompt, default, indexed, stream):
'''
Prompt once.
If you want the default displayed, put a format {} into the
post_prompt string (like 'select one [{}]: ')
'''
# try to sub in the default if provided
if default is not None:
if '{}' in pre_prompt:
pre_prompt = pre_prompt.format(default)
if '{}' in post_prompt:
post_prompt = post_prompt.format(default)
# build the item strings
item_format = "{indent}{item}"
if indexed:
item_format = "{indent}[{index}] {item}"
item_text_list = []
indent = ' '
for index, item in enumerate(items):
item_text = ''
components = {
'indent': indent,
'item': item
}
if indexed:
components['index'] = index
item_text = item_format.format(**components)
item_text_list.append(item_text)
# build full menu
menu_parts = [pre_prompt] + item_text_list
full_menu = '\n'.join(menu_parts) + '\n'
stream.write(full_menu)
stream.flush()
# Get user response
# - py 2/3 compatibility
get_input = input
try:
get_input = raw_input
except NameError:
pass
# - actuall get input
response = get_input(post_prompt)
return response
|
Prompt once.
If you want the default displayed, put a format {} into the
post_prompt string (like 'select one [{}]: ')
|
def snake_to_camel(value):
"""
Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz'
"""
camel = "".join(word.title() for word in value.split("_"))
return value[:1].lower() + camel[1:]
|
Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz'
|
def appendComponent(self, baseGlyph=None, offset=None, scale=None, component=None):
"""
Append a component to this glyph.
>>> component = glyph.appendComponent("A")
This will return a :class:`BaseComponent` object representing
the new component in the glyph. ``offset`` indicates the x and
y shift values that should be applied to the appended component.
It must be a :ref:`type-coordinate` value or ``None``. If
``None`` is given, the offset will be ``(0, 0)``.
>>> component = glyph.appendComponent("A", offset=(10, 20))
``scale`` indicates the x and y scale values that should be
applied to the appended component. It must be a
:ref:`type-scale` value or ``None``. If ``None`` is given,
the scale will be ``(1.0, 1.0)``.
>>> component = glyph.appendComponent("A", scale=(1.0, 2.0))
``component`` may be a :class:`BaseComponent` object from which
attribute values will be copied. If ``baseGlyph``, ``offset``
or ``scale`` are specified as arguments, those values will be used
instead of the values in the given component object.
"""
identifier = None
sxy = 0
syx = 0
if component is not None:
component = normalizers.normalizeComponent(component)
if baseGlyph is None:
baseGlyph = component.baseGlyph
sx, sxy, syx, sy, ox, oy = component.transformation
if offset is None:
offset = (ox, oy)
if scale is None:
scale = (sx, sy)
if baseGlyph is None:
baseGlyph = component.baseGlyph
if component.identifier is not None:
existing = set([c.identifier for c in self.components if c.identifier is not None])
if component.identifier not in existing:
identifier = component.identifier
baseGlyph = normalizers.normalizeGlyphName(baseGlyph)
if self.name == baseGlyph:
raise FontPartsError(("A glyph cannot contain a component referencing itself."))
if offset is None:
offset = (0, 0)
if scale is None:
scale = (1, 1)
offset = normalizers.normalizeTransformationOffset(offset)
scale = normalizers.normalizeTransformationScale(scale)
ox, oy = offset
sx, sy = scale
transformation = (sx, sxy, syx, sy, ox, oy)
identifier = normalizers.normalizeIdentifier(identifier)
return self._appendComponent(baseGlyph, transformation=transformation, identifier=identifier)
|
Append a component to this glyph.
>>> component = glyph.appendComponent("A")
This will return a :class:`BaseComponent` object representing
the new component in the glyph. ``offset`` indicates the x and
y shift values that should be applied to the appended component.
It must be a :ref:`type-coordinate` value or ``None``. If
``None`` is given, the offset will be ``(0, 0)``.
>>> component = glyph.appendComponent("A", offset=(10, 20))
``scale`` indicates the x and y scale values that should be
applied to the appended component. It must be a
:ref:`type-scale` value or ``None``. If ``None`` is given,
the scale will be ``(1.0, 1.0)``.
>>> component = glyph.appendComponent("A", scale=(1.0, 2.0))
``component`` may be a :class:`BaseComponent` object from which
attribute values will be copied. If ``baseGlyph``, ``offset``
or ``scale`` are specified as arguments, those values will be used
instead of the values in the given component object.
|
def get_hosting_device_plugging_driver(self, context, id):
"""Returns plugging driver for hosting device template with <id>."""
if id is None:
return
try:
return self._plugging_drivers[id]
except KeyError:
try:
template = self._get_hosting_device_template(context, id)
self._plugging_drivers[id] = importutils.import_object(
template['plugging_driver'])
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception("Error loading plugging driver for hosting "
"device template %s", id)
return self._plugging_drivers.get(id)
|
Returns plugging driver for hosting device template with <id>.
|
def group(requestContext, *seriesLists):
"""
Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one.
"""
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup
|
Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one.
|
def get_all_rules(cls):
"Load all available Adblock rules."
from adblockparser import AdblockRules
raw_rules = []
for url in [
config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]:
raw_rules.extend(cls.load_raw_rules(url))
rules = AdblockRules(raw_rules)
return rules
|
Load all available Adblock rules.
|
def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):
"""Wrapper for getting details about networks.
:param string kind: network kind. Typically 'public' or 'private'
:param boolean router: flag to include router information
:param boolean vlans: flag to include vlan information
:param boolean vlan_ids: flag to include vlan_ids
"""
network = {}
macs = self.get('%s_mac' % kind)
network['mac_addresses'] = macs
if len(macs) == 0:
return network
if router:
network['router'] = self.get('router', macs[0])
if vlans:
network['vlans'] = self.get('vlans', macs[0])
if vlan_ids:
network['vlan_ids'] = self.get('vlan_ids', macs[0])
return network
|
Wrapper for getting details about networks.
:param string kind: network kind. Typically 'public' or 'private'
:param boolean router: flag to include router information
:param boolean vlans: flag to include vlan information
:param boolean vlan_ids: flag to include vlan_ids
|
def process(self, request):
"""
process determines if this item should visible, if its selected, etc...
"""
# if we're not visible we return since we don't need to do anymore processing
self.check(request)
if not self.visible:
return
# evaluate our title
if callable(self.title):
self.title = self.title(request)
# if no title is set turn it into a slug
if self.slug is None:
# in python3 we don't need to convert to unicode, in python2 slugify
# requires a unicode string
if sys.version_info > (3, 0):
self.slug = slugify(self.title)
else:
self.slug = slugify(unicode(self.title))
# evaluate children
if callable(self.children):
children = list(self.children(request))
else:
children = list(self.children)
for child in children:
child.parent = self
child.process(request)
self.children = [
child
for child in children
if child.visible
]
self.children.sort(key=lambda child: child.weight)
# if we have no children and MENU_HIDE_EMPTY then we are not visible and should return
hide_empty = getattr(settings, 'MENU_HIDE_EMPTY', False)
if hide_empty and len(self.children) == 0:
self.visible = False
return
# find out if one of our children is selected, and mark it as such
curitem = None
for item in self.children:
item.selected = False
if item.match_url(request):
if curitem is None or len(curitem.url) < len(item.url):
curitem = item
if curitem is not None:
curitem.selected = True
|
process determines if this item should visible, if its selected, etc...
|
def check_connection (self):
"""
Connect to NNTP server and try to request the URL article
resource (if specified).
"""
nntpserver = self.host or self.aggregate.config["nntpserver"]
if not nntpserver:
self.add_warning(
_("No NNTP server was specified, skipping this URL."),
tag=WARN_NNTP_NO_SERVER)
return
nntp = self._connect_nntp(nntpserver)
group = self.urlparts[2]
while group[:1] == '/':
group = group[1:]
if '@' in group:
# request article info (resp, number mid)
number = nntp.stat("<"+group+">")[1]
self.add_info(_('Article number %(num)s found.') % {"num": number})
else:
# split off trailing articel span
group = group.split('/', 1)[0]
if group:
# request group info (resp, count, first, last, name)
name = nntp.group(group)[4]
self.add_info(_("News group %(name)s found.") % {"name": name})
else:
# group name is the empty string
self.add_warning(_("No newsgroup specified in NNTP URL."),
tag=WARN_NNTP_NO_NEWSGROUP)
|
Connect to NNTP server and try to request the URL article
resource (if specified).
|
def get_projections_on_elements_and_orbitals(self, el_orb_spec):
"""
Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Si':['3s','3p']} or {'Si':['3s','3p_x', '3p_y', '3p_z']} depending on input files
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
"""
result = {}
structure = self.structure
el_orb_spec = {get_el_sp(el): orbs for el, orbs in el_orb_spec.items()}
for spin, v in self.projections.items():
result[spin] = [[{str(e): collections.defaultdict(float)
for e in el_orb_spec}
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j in itertools.product(range(self.nb_bands),
range(len(self.kpoints))):
for key, item in v[i][j].items():
for key2, item2 in item.items():
specie = str(Specie(re.split(r"[0-9]+", key)[0]))
if get_el_sp(str(specie)) in el_orb_spec:
if key2 in el_orb_spec[get_el_sp(str(specie))]:
result[spin][i][j][specie][key2] += item2
return result
|
Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Si':['3s','3p']} or {'Si':['3s','3p_x', '3p_y', '3p_z']} depending on input files
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict.
|
def build_iiif_file_storage_path(url_path, ik_image, iiif_storage):
"""
Return the file storage path for a given IIIF Image API URL path.
NOTE: The returned file storage path includes the given ``Image``
instance's ID to ensure the path is unique and identifiable, and its
modified timestamp to act as a primitive cache-busting mechanism for
when the image is changed but there are pre-existing image conversions.
TODO: Ideally we should use a hash or timestamp for Image's actual
image data changes, not the whole instance which could change but
have same image.
"""
storage_path = url_path[1:] # Stip leading slash
# Strip redundant 'iiif-' prefix if present (re-added below)
if storage_path.startswith('iiif/'):
storage_path = storage_path[5:]
# Add Image's modified timestamp to storage path as a primitive
# cache-busting mechanism.
ik_image_ts = str(calendar.timegm(ik_image.date_modified.timetuple()))
splits = storage_path.split('/')
storage_path = '/'.join(
[splits[0]] + # Image ID
[ik_image_ts] + # Image instance modified timestamp
splits[1:] # Remainder of storage path
)
# Replace '/' & ',' with '-' to keep separators of some kind in
# storage file name, otherwise the characters get purged and
# produce storage names with potentially ambiguous and clashing
# values e.g. /3/100,100,200,200/... => iiif3100100200200
storage_path = storage_path.replace('/', '-').replace(',', '-')
# Convert URL path format to a valid file name for a given storage engine
storage_path = iiif_storage.get_valid_name(storage_path)
# Add path prefix to storage path to avoid dumping image files
# into the location of a storage location that might be used for many
# purposes.
if iiif_storage.location != 'iiif':
storage_path = 'iiif/' + storage_path
return storage_path
|
Return the file storage path for a given IIIF Image API URL path.
NOTE: The returned file storage path includes the given ``Image``
instance's ID to ensure the path is unique and identifiable, and its
modified timestamp to act as a primitive cache-busting mechanism for
when the image is changed but there are pre-existing image conversions.
TODO: Ideally we should use a hash or timestamp for Image's actual
image data changes, not the whole instance which could change but
have same image.
|
def _contains_yieldpoint(children):
"""Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`.
"""
if isinstance(children, dict):
return any(isinstance(i, YieldPoint) for i in children.values())
if isinstance(children, list):
return any(isinstance(i, YieldPoint) for i in children)
return False
|
Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`.
|
def financials(self, security):
"""
get financials:
google finance provide annual and quanter financials, if annual is true, we will use annual data
Up to four lastest year/quanter data will be provided by google
Refer to page as an example: http://www.google.com/finance?q=TSE:CVG&fstype=ii
"""
try:
url = 'http://www.google.com/finance?q=%s&fstype=ii' % security
try:
page = self._request(url).read()
except UfException as ufExcep:
# if symol is not right, will get 400
if Errors.NETWORK_400_ERROR == ufExcep.getCode:
raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security)
raise ufExcep
bPage = BeautifulSoup(page)
target = bPage.find(id='incinterimdiv')
keyTimeValue = {}
# ugly do...while
i = 0
while True:
self._parseTarget(target, keyTimeValue)
if i < 5:
i += 1
target = target.nextSibling
# ugly beautiful soap...
if '\n' == target:
target = target.nextSibling
else:
break
return keyTimeValue
except BaseException:
raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in GoogleFinance.getHistoricalPrices %s" % traceback.format_exc())
|
get financials:
google finance provide annual and quanter financials, if annual is true, we will use annual data
Up to four lastest year/quanter data will be provided by google
Refer to page as an example: http://www.google.com/finance?q=TSE:CVG&fstype=ii
|
def get_default_download_dir(self, *subdirs):
""" Get the download path for a file. If not defined, return default
from config.
Parameters
==========
subdirs: a single (or list of) subfolders under the basepath
"""
# Look up value for key "path" in the config
path = self.get_config_value(self.CONFIG_NAME_PATH)
# If not set in config, default to present working directory
if path is None:
return os.getcwd()
return os.path.join(path, *subdirs)
|
Get the download path for a file. If not defined, return default
from config.
Parameters
==========
subdirs: a single (or list of) subfolders under the basepath
|
def on_enter(self, command):
"""on_enter"""
if self.profile:
# Simple profiling test
t0 = time()
for _ in range(10):
self.execute_command(command)
self.insert_text(u"\n<Δt>=%dms\n" % (1e2*(time()-t0)))
self.new_prompt(self.interpreter.p1)
else:
self.execute_command(command)
self.__flush_eventqueue()
|
on_enter
|
def ignore(self, *ignore_lst: str):
"""
ignore a set of tokens with specific names
"""
def stream():
for each in ignore_lst:
each = ConstStrPool.cast_to_const(each)
yield id(each), each
self.ignore_lst.update(stream())
|
ignore a set of tokens with specific names
|
def insertPrimaryDataset(self):
"""
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
"""
try :
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("primds", indata)
indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() })
self.dbsPrimaryDataset.insertPrimaryDataset(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
|
def multi_buffering(layer, radii, callback=None):
"""Buffer a vector layer using many buffers (for volcanoes or rivers).
This processing algorithm will keep the original attribute table and
will add a new one for the hazard class name according to
safe.definitions.fields.hazard_value_field.
radii = OrderedDict()
radii[500] = 'high'
radii[1000] = 'medium'
radii[2000] = 'low'
Issue https://github.com/inasafe/inasafe/issues/3185
:param layer: The layer to polygonize.
:type layer: QgsVectorLayer
:param radii: A dictionary of radius.
:type radii: OrderedDict
:param callback: A function to all to indicate progress. The function
should accept params 'current' (int), 'maximum' (int) and 'step' (str).
Defaults to None.
:type callback: function
:return: The buffered vector layer.
:rtype: QgsVectorLayer
"""
# Layer output
output_layer_name = buffer_steps['output_layer_name']
processing_step = buffer_steps['step_name']
input_crs = layer.crs()
feature_count = layer.featureCount()
fields = layer.fields()
# Set the new hazard class field.
new_field = create_field_from_definition(hazard_class_field)
fields.append(new_field)
# Set the new buffer distances field.
new_field = create_field_from_definition(buffer_distance_field)
fields.append(new_field)
buffered = create_memory_layer(
output_layer_name, QgsWkbTypes.PolygonGeometry, input_crs, fields)
buffered.startEditing()
# Reproject features if needed into UTM if the layer is in 4326.
if layer.crs().authid() == 'EPSG:4326':
center = layer.extent().center()
utm = QgsCoordinateReferenceSystem(
get_utm_epsg(center.x(), center.y(), input_crs))
transform = QgsCoordinateTransform(
layer.crs(), utm, QgsProject.instance())
reverse_transform = QgsCoordinateTransform(
utm, layer.crs(), QgsProject.instance())
else:
transform = None
reverse_transform = None
for i, feature in enumerate(layer.getFeatures()):
geom = QgsGeometry(feature.geometry())
if transform:
geom.transform(transform)
inner_ring = None
for radius in radii:
attributes = feature.attributes()
# We add the hazard value name to the attribute table.
attributes.append(radii[radius])
# We add the value of buffer distance to the attribute table.
attributes.append(radius)
circle = geom.buffer(radius, 30)
if inner_ring:
circle.addRing(inner_ring)
inner_ring = circle.asPolygon()[0]
new_feature = QgsFeature()
if reverse_transform:
circle.transform(reverse_transform)
new_feature.setGeometry(circle)
new_feature.setAttributes(attributes)
buffered.addFeature(new_feature)
if callback:
callback(current=i, maximum=feature_count, step=processing_step)
buffered.commitChanges()
# We transfer keywords to the output.
buffered.keywords = layer.keywords
buffered.keywords['layer_geometry'] = 'polygon'
buffered.keywords['layer_purpose'] = layer_purpose_hazard['key']
buffered.keywords['inasafe_fields'][hazard_class_field['key']] = (
hazard_class_field['field_name'])
check_layer(buffered)
return buffered
|
Buffer a vector layer using many buffers (for volcanoes or rivers).
This processing algorithm will keep the original attribute table and
will add a new one for the hazard class name according to
safe.definitions.fields.hazard_value_field.
radii = OrderedDict()
radii[500] = 'high'
radii[1000] = 'medium'
radii[2000] = 'low'
Issue https://github.com/inasafe/inasafe/issues/3185
:param layer: The layer to polygonize.
:type layer: QgsVectorLayer
:param radii: A dictionary of radius.
:type radii: OrderedDict
:param callback: A function to all to indicate progress. The function
should accept params 'current' (int), 'maximum' (int) and 'step' (str).
Defaults to None.
:type callback: function
:return: The buffered vector layer.
:rtype: QgsVectorLayer
|
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
|
helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
|
def can(self, *args, **kwargs):
'''Overwrite this method to implement custom contextual permissions'''
if isinstance(self.require, auth.Permission):
return self.require.can()
elif callable(self.require):
return self.require()
elif isinstance(self.require, bool):
return self.require
else:
return True
|
Overwrite this method to implement custom contextual permissions
|
def integrity_negotiated(self):
"""
After :meth:`step` has been called, this property will be set to
True if integrity protection (signing) has been negotiated in this context, False
otherwise. If this property is True, you can use :meth:`get_mic` to sign messages with a
message integrity code (MIC), which the peer application can verify.
"""
return (
self.flags & C.GSS_C_INTEG_FLAG
) and (
self.established or (self.flags & C.GSS_C_PROT_READY_FLAG)
)
|
After :meth:`step` has been called, this property will be set to
True if integrity protection (signing) has been negotiated in this context, False
otherwise. If this property is True, you can use :meth:`get_mic` to sign messages with a
message integrity code (MIC), which the peer application can verify.
|
def iter_fit_shifts(xy,uv,nclip=3,sigma=3.0):
""" Perform an iterative-fit with 'nclip' iterations
"""
fit = fit_shifts(xy,uv)
if nclip is None: nclip = 0
# define index to initially include all points
for n in range(nclip):
resids = compute_resids(xy,uv,fit)
resids1d = np.sqrt(np.power(resids[:,0],2)+np.power(resids[:,1],2))
sig = resids1d.std()
# redefine what pixels will be included in next iteration
goodpix = resids1d < sigma*sig
xy = xy[goodpix]
uv = uv[goodpix]
fit = fit_shifts(xy,uv)
fit['img_coords'] = xy
fit['ref_coords'] = uv
return fit
|
Perform an iterative-fit with 'nclip' iterations
|
def _checkServer(self, address, port):
"""
*Check that the TCP Port we've decided to use for tunnelling is available*
"""
# CREATE A TCP SOCKET
import socket
s = socket.socket()
try:
s.connect((address, port))
return True
except socket.error, e:
self.log.warning(
"""Connection to `%(address)s` on port `%(port)s` failed - try again: %(e)s""" % locals())
return False
return None
|
*Check that the TCP Port we've decided to use for tunnelling is available*
|
def update_hit_tally(self):
''' Tally hits '''
if not self.quiet:
num_hits = self.amt_services_wrapper.tally_hits()
if self.sandbox:
self.sandbox_hits = num_hits
else:
self.live_hits = num_hits
|
Tally hits
|
def superclass(self, klass):
"""True if the Class is a superclass of the given one."""
return bool(lib.EnvSuperclassP(self._env, self._cls, klass._cls))
|
True if the Class is a superclass of the given one.
|
def scalar_projection(v1, v2):
'''compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
'''
return np.dot(v1, v2) / np.linalg.norm(v2)
|
compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
|
def explain(self, sql=None, sql_args=None):
"""
Runs EXPLAIN on this query
:type sql: str or None
:param sql: The sql to run EXPLAIN on. If None is specified, the query will
use ``self.get_sql()``
:type sql_args: dict or None
:param sql_args: A dictionary of the arguments to be escaped in the query. If None and
sql is None, the query will use ``self.get_args()``
:rtype: list of str
:return: list of each line of output from the EXPLAIN statement
"""
cursor = self.get_cursor()
if sql is None:
sql = self.get_sql()
sql_args = self.get_args()
elif sql_args is None:
sql_args = {}
cursor.execute('EXPLAIN {0}'.format(sql), sql_args)
rows = self._fetch_all_as_dict(cursor)
return rows
|
Runs EXPLAIN on this query
:type sql: str or None
:param sql: The sql to run EXPLAIN on. If None is specified, the query will
use ``self.get_sql()``
:type sql_args: dict or None
:param sql_args: A dictionary of the arguments to be escaped in the query. If None and
sql is None, the query will use ``self.get_args()``
:rtype: list of str
:return: list of each line of output from the EXPLAIN statement
|
def get_function_args(func, no_self=False, no_varargs=False):
"""
Return tuple of the function argument names in the order of the function signature.
:param func: Function
:type func: function object
:param no_self: Flag that indicates whether the function argument *self*,
if present, is included in the output (False) or not (True)
:type no_self: boolean
:param no_varargs: Flag that indicates whether keyword arguments are
included in the output (True) or not (False)
:type no_varargs: boolean
:rtype: tuple
For example:
>>> import pexdoc.pinspect
>>> class MyClass(object):
... def __init__(self, value, **kwargs):
... pass
...
>>> pexdoc.pinspect.get_function_args(MyClass.__init__)
('self', 'value', '**kwargs')
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_self=True
... )
('value', '**kwargs')
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_self=True, no_varargs=True
... )
('value',)
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_varargs=True
... )
('self', 'value')
"""
par_dict = signature(func).parameters
# Mark positional and/or keyword arguments (if any)
pos = lambda x: x.kind == Parameter.VAR_POSITIONAL
kw = lambda x: x.kind == Parameter.VAR_KEYWORD
opts = ["", "*", "**"]
args = [
"{prefix}{arg}".format(prefix=opts[pos(value) + 2 * kw(value)], arg=par)
for par, value in par_dict.items()
]
# Filter out 'self' from parameter list (optional)
self_filtered_args = (
args if not args else (args[1 if (args[0] == "self") and no_self else 0 :])
)
# Filter out positional or keyword arguments (optional)
pos = lambda x: (len(x) > 1) and (x[0] == "*") and (x[1] != "*")
kw = lambda x: (len(x) > 2) and (x[:2] == "**")
varargs_filtered_args = [
arg
for arg in self_filtered_args
if (not no_varargs) or all([no_varargs, not pos(arg), not kw(arg)])
]
return tuple(varargs_filtered_args)
|
Return tuple of the function argument names in the order of the function signature.
:param func: Function
:type func: function object
:param no_self: Flag that indicates whether the function argument *self*,
if present, is included in the output (False) or not (True)
:type no_self: boolean
:param no_varargs: Flag that indicates whether keyword arguments are
included in the output (True) or not (False)
:type no_varargs: boolean
:rtype: tuple
For example:
>>> import pexdoc.pinspect
>>> class MyClass(object):
... def __init__(self, value, **kwargs):
... pass
...
>>> pexdoc.pinspect.get_function_args(MyClass.__init__)
('self', 'value', '**kwargs')
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_self=True
... )
('value', '**kwargs')
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_self=True, no_varargs=True
... )
('value',)
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_varargs=True
... )
('self', 'value')
|
def run_section(self, name, input_func=_stdin_):
"""Run the given section."""
print('\nStuff %s by the license:\n' % name)
section = self.survey[name]
for question in section:
self.run_question(question, input_func)
|
Run the given section.
|
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil):
"""
Upload file hdfsName from hdfs to s3
"""
if mock_mode():
truncate_file(master_ip, hdfs_name, spark_on_toil)
log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name)
call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory)
remove_file(master_ip, hdfs_name, spark_on_toil)
|
Upload file hdfsName from hdfs to s3
|
def _check_tunnel(self, _srv):
""" Check if tunnel is already established """
if self.skip_tunnel_checkup:
self.tunnel_is_up[_srv.local_address] = True
return
self.logger.info('Checking tunnel to: {0}'.format(_srv.remote_address))
if isinstance(_srv.local_address, string_types): # UNIX stream
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(TUNNEL_TIMEOUT)
try:
# Windows raises WinError 10049 if trying to connect to 0.0.0.0
connect_to = ('127.0.0.1', _srv.local_port) \
if _srv.local_host == '0.0.0.0' else _srv.local_address
s.connect(connect_to)
self.tunnel_is_up[_srv.local_address] = _srv.tunnel_ok.get(
timeout=TUNNEL_TIMEOUT * 1.1
)
self.logger.debug(
'Tunnel to {0} is DOWN'.format(_srv.remote_address)
)
except socket.error:
self.logger.debug(
'Tunnel to {0} is DOWN'.format(_srv.remote_address)
)
self.tunnel_is_up[_srv.local_address] = False
except queue.Empty:
self.logger.debug(
'Tunnel to {0} is UP'.format(_srv.remote_address)
)
self.tunnel_is_up[_srv.local_address] = True
finally:
s.close()
|
Check if tunnel is already established
|
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')],
'response' : ['time'],
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
|
All olympics sprint winning times for multiple output prediction.
|
def visitAdditionOrSubtractionExpression(self, ctx):
"""
expression: expression (PLUS | MINUS) expression
"""
is_add = ctx.PLUS() is not None
arg1 = self.visit(ctx.expression(0))
arg2 = self.visit(ctx.expression(1))
# first try as decimals
try:
_arg1 = conversions.to_decimal(arg1, self._eval_context)
_arg2 = conversions.to_decimal(arg2, self._eval_context)
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError:
pass
# then as date + something
try:
_arg1 = conversions.to_date_or_datetime(arg1, self._eval_context)
if isinstance(arg2, datetime.time):
# upgrade our date to datetime
_arg1 = conversions.to_datetime(_arg1, self._eval_context)
# convert time value to a duration
_arg2 = datetime.timedelta(hours=arg2.hour, minutes=arg2.minute, seconds=arg2.second, microseconds=arg2.microsecond)
else:
_arg2 = datetime.timedelta(days=conversions.to_integer(arg2, self._eval_context))
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError as ex:
raise EvaluationError("Expression could not be evaluated as decimal or date arithmetic", ex)
|
expression: expression (PLUS | MINUS) expression
|
def callback(self, event):
"""
Function that gets called on each event from pyinotify.
"""
# IN_CLOSE_WRITE -> 0x00000008
if event.mask == 0x00000008:
if event.name.endswith('.json'):
print_success("Ldapdomaindump file found")
if event.name in ['domain_groups.json', 'domain_users.json']:
if event.name == 'domain_groups.json':
self.domain_groups_file = event.pathname
if event.name == 'domain_users.json':
self.domain_users_file = event.pathname
if self.domain_groups_file and self.domain_users_file:
print_success("Importing users")
subprocess.Popen(['jk-import-domaindump', self.domain_groups_file, self.domain_users_file])
elif event.name == 'domain_computers.json':
print_success("Importing computers")
subprocess.Popen(['jk-import-domaindump', event.pathname])
# Ldap has been dumped, so remove the ldap targets.
self.ldap_strings = []
self.write_targets()
if event.name.endswith('_samhashes.sam'):
host = event.name.replace('_samhashes.sam', '')
# TODO import file.
print_success("Secretsdump file, host ip: {}".format(host))
subprocess.Popen(['jk-import-secretsdump', event.pathname])
# Remove this system from this ip list.
self.ips.remove(host)
self.write_targets()
|
Function that gets called on each event from pyinotify.
|
def _get_lts_from_user(self, user):
""" Get layertemplates owned by a user from the database. """
req = meta.Session.query(LayerTemplate).select_from(join(LayerTemplate, User))
return req.filter(User.login==user).all()
|
Get layertemplates owned by a user from the database.
|
def libvlc_video_get_track_description(p_mi):
'''Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or NULL on error.
'''
f = _Cfunctions.get('libvlc_video_get_track_description', None) or \
_Cfunction('libvlc_video_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
|
Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or NULL on error.
|
def _ReadDataTypeDefinitionWithMembers(
self, definitions_registry, definition_values,
data_type_definition_class, definition_name, supports_conditions=False):
"""Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
members = definition_values.get('members', None)
if not members:
error_message = 'missing members'
raise errors.DefinitionReaderError(definition_name, error_message)
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE_WITH_MEMBERS)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
self._SUPPORTED_ATTRIBUTES_STORAGE_DATA_TYPE)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(
', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = 'unsupported byte-order attribute: {0!s}'.format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
for member in members:
section = member.get('section', None)
if section:
member_section_definition = data_types.MemberSectionDefinition(section)
definition_object.AddSectionDefinition(member_section_definition)
else:
member_data_type_definition = self._ReadMemberDataTypeDefinitionMember(
definitions_registry, member, definition_object.name,
supports_conditions=supports_conditions)
definition_object.AddMemberDefinition(member_data_type_definition)
return definition_object
|
Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
|
def get_source(fileobj):
"""Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read.
"""
if not isinstance(fileobj, dict):
return fileobj
else:
try:
with io.open(fileobj["filename"], encoding="utf-8",
errors="ignore") as f:
return f.read()
finally:
if fileobj.get('delete_after_use'):
try:
os.remove(fileobj["filename"])
except: # pragma: no cover
pass
|
Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read.
|
def team_matches(self, team, event=None, year=None, simple=False, keys=False):
"""
Get list of matches team has participated in.
:param team: Team to get matches of.
:param year: Year to get matches from.
:param event: Event to get matches from.
:param simple: Get only vital data.
:param keys: Only get match keys rather than their full data.
:return: List of string keys or Match objects.
"""
if event:
if keys:
return self._get('team/%s/event/%s/matches/keys' % (self.team_key(team), event))
else:
return [Match(raw) for raw in self._get('team/%s/event/%s/matches%s' % (self.team_key(team), event, '/simple' if simple else ''))]
elif year:
if keys:
return self._get('team/%s/matches/%s/keys' % (self.team_key(team), year))
else:
return [Match(raw) for raw in self._get('team/%s/matches/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))]
|
Get list of matches team has participated in.
:param team: Team to get matches of.
:param year: Year to get matches from.
:param event: Event to get matches from.
:param simple: Get only vital data.
:param keys: Only get match keys rather than their full data.
:return: List of string keys or Match objects.
|
def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m))
|
add messages in the console_messages list
|
def append(self, element):
'''
Append a PileupElement to this Pileup. If an identical PileupElement is
already part of this Pileup, do nothing.
'''
assert element.locus == self.locus, (
"Element locus (%s) != Pileup locus (%s)"
% (element.locus, self.locus))
self.elements[element] = None
|
Append a PileupElement to this Pileup. If an identical PileupElement is
already part of this Pileup, do nothing.
|
def get_command_from_module(
command_module,
remote_connection: environ.RemoteConnection
):
"""
Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return:
"""
use_remote = (
remote_connection.active and
hasattr(command_module, 'execute_remote')
)
return (
command_module.execute_remote
if use_remote else
command_module.execute
)
|
Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return:
|
def connect(self, url=None):
"""
Connect to the bugzilla instance with the given url. This is
called by __init__ if a URL is passed. Or it can be called manually
at any time with a passed URL.
This will also read any available config files (see readconfig()),
which may set 'user' and 'password', and others.
If 'user' and 'password' are both set, we'll run login(). Otherwise
you'll have to login() yourself before some methods will work.
"""
if self._transport:
self.disconnect()
if url is None and self.url:
url = self.url
url = self.fix_url(url)
self._transport = _RequestsTransport(
url, self._cookiejar, sslverify=self._sslverify, cert=self.cert)
self._transport.user_agent = self.user_agent
self._proxy = _BugzillaServerProxy(url, self.tokenfile,
self._transport)
self.url = url
# we've changed URLs - reload config
self.readconfig()
if (self.user and self.password):
log.info("user and password present - doing login()")
self.login()
if self.api_key:
log.debug("using API key")
self._proxy.use_api_key(self.api_key)
version = self._proxy.Bugzilla.version()["version"]
log.debug("Bugzilla version string: %s", version)
self._set_bz_version(version)
|
Connect to the bugzilla instance with the given url. This is
called by __init__ if a URL is passed. Or it can be called manually
at any time with a passed URL.
This will also read any available config files (see readconfig()),
which may set 'user' and 'password', and others.
If 'user' and 'password' are both set, we'll run login(). Otherwise
you'll have to login() yourself before some methods will work.
|
def _number_of_line(member_tuple):
"""Try to return the number of the first line of the definition of a
member of a module."""
member = member_tuple[1]
try:
return member.__code__.co_firstlineno
except AttributeError:
pass
try:
return inspect.findsource(member)[1]
except BaseException:
pass
for value in vars(member).values():
try:
return value.__code__.co_firstlineno
except AttributeError:
pass
return 0
|
Try to return the number of the first line of the definition of a
member of a module.
|
def _GetSectionNames(self, pefile_object):
"""Retrieves all PE section names.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[str]: names of the sections.
"""
section_names = []
for section in pefile_object.sections:
section_name = getattr(section, 'Name', b'')
# Ensure the name is decoded correctly.
try:
section_name = '{0:s}'.format(section_name.decode('unicode_escape'))
except UnicodeDecodeError:
section_name = '{0:s}'.format(repr(section_name))
section_names.append(section_name)
return section_names
|
Retrieves all PE section names.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[str]: names of the sections.
|
def service_messages(self, short_name):
"""Get the messages stored for a service.
Args:
short_name (string): The short name of the service to get messages for
Returns:
list(ServiceMessage): A list of the ServiceMessages stored for this service
"""
if short_name not in self.services:
raise ArgumentError("Unknown service name", short_name=short_name)
return list(self.services[short_name]['state'].messages)
|
Get the messages stored for a service.
Args:
short_name (string): The short name of the service to get messages for
Returns:
list(ServiceMessage): A list of the ServiceMessages stored for this service
|
def is_null(*symbols):
""" True if no nodes or all the given nodes are either
None, NOP or empty blocks. For blocks this applies recursively
"""
from symbols.symbol_ import Symbol
for sym in symbols:
if sym is None:
continue
if not isinstance(sym, Symbol):
return False
if sym.token == 'NOP':
continue
if sym.token == 'BLOCK':
if not is_null(*sym.children):
return False
continue
return False
return True
|
True if no nodes or all the given nodes are either
None, NOP or empty blocks. For blocks this applies recursively
|
def logical_name(self):
"""The logical name of the seat.
This is an identifier to group sets of devices within the compositor.
Returns:
str: The logical name of this seat.
"""
pchar = self._libinput.libinput_seat_get_logical_name(self._handle)
return string_at(pchar).decode()
|
The logical name of the seat.
This is an identifier to group sets of devices within the compositor.
Returns:
str: The logical name of this seat.
|
def stream_interactions(self):
"""Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
"""
timestamps = sorted(self.time_to_edge.keys())
for t in timestamps:
for e in self.time_to_edge[t]:
yield (e[0], e[1], e[2], t)
|
Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
|
def add(self, *tasks):
""" Interfaces the GraphNode `add` method
"""
nodes = [x.node for x in tasks]
self.node.add(*nodes)
return self
|
Interfaces the GraphNode `add` method
|
def _get_multiparts(response):
"""
From this
'multipart/parallel; boundary="874e43d27ec6d83f30f37841bdaf90c7"; charset=utf-8'
get this
--874e43d27ec6d83f30f37841bdaf90c7
"""
boundary = None
for part in response.headers.get('Content-Type', '').split(';'):
if 'boundary=' in part:
boundary = '--{}'.format(part.split('=', 1)[1].strip('\"'))
break
if not boundary:
raise ParseError("Was not able to find the boundary between objects in a multipart response")
if response.content is None:
return []
response_string = response.content
if six.PY3:
# Python3 returns bytes, decode for string operations
response_string = response_string.decode('latin-1')
# help bad responses be more multipart compliant
whole_body = response_string.strip('\r\n')
no_front_boundary = whole_body.strip(boundary)
# The boundary comes with some characters
multi_parts = []
for part in no_front_boundary.split(boundary):
multi_parts.append(part.strip('\r\n'))
return multi_parts
|
From this
'multipart/parallel; boundary="874e43d27ec6d83f30f37841bdaf90c7"; charset=utf-8'
get this
--874e43d27ec6d83f30f37841bdaf90c7
|
def victim_pivot(self, victim_resource):
"""Pivot point on Victims for this resource.
This method will return all *resources* (group, indicators, task,
etc) for this resource that are associated with the provided victim id.
**Example Endpoints URI's**
+--------------+---------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+===============================================================+
| GET | /v2/victims/{resourceId}/groups/{resourceType} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/groups/{resourceType}/{uniqueId} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/indicators/{resourceType} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/indicators/{resourceType}/{uniqueId} |
+--------------+---------------------------------------------------------------+
Args:
resource_id (integer): The resource pivot id (victim id).
"""
resource = self.copy()
resource._request_uri = '{}/{}'.format(victim_resource.request_uri, resource._request_uri)
return resource
|
Pivot point on Victims for this resource.
This method will return all *resources* (group, indicators, task,
etc) for this resource that are associated with the provided victim id.
**Example Endpoints URI's**
+--------------+---------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+===============================================================+
| GET | /v2/victims/{resourceId}/groups/{resourceType} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/groups/{resourceType}/{uniqueId} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/indicators/{resourceType} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/indicators/{resourceType}/{uniqueId} |
+--------------+---------------------------------------------------------------+
Args:
resource_id (integer): The resource pivot id (victim id).
|
def getall(self, key, default=_marker):
"""Return a list of all values matching the key."""
identity = self._title(key)
res = [v for i, k, v in self._impl._items if i == identity]
if res:
return res
if not res and default is not _marker:
return default
raise KeyError('Key not found: %r' % key)
|
Return a list of all values matching the key.
|
def compile_pattern_list(self, patterns):
'''This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(cpl, timeout)
...
'''
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list
|
This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(cpl, timeout)
...
|
def _clearPrices(self):
""" Clears prices according to auction type.
"""
for offbid in self.offers + self.bids:
if self.auctionType == DISCRIMINATIVE:
offbid.clearedPrice = offbid.price
elif self.auctionType == FIRST_PRICE:
offbid.clearedPrice = offbid.lmbda
else:
raise ValueError
|
Clears prices according to auction type.
|
def collect(self):
"""
Overrides the Collector.collect method
"""
if psutil is None:
self.log.error('Unable to import module psutil')
return {}
for port_name, port_cfg in self.ports.iteritems():
port = int(port_cfg['number'])
stats = get_port_stats(port)
for stat_name, stat_value in stats.iteritems():
metric_name = '%s.%s' % (port_name, stat_name)
self.publish(metric_name, stat_value)
|
Overrides the Collector.collect method
|
def enumerate_dynamic_imports(tokens):
"""
Returns a dictionary of all dynamically imported modules (those inside of
classes or functions) in the form of {<func or class name>: [<modules>]}
Example:
>>> enumerate_dynamic_modules(tokens)
{'myfunc': ['zlib', 'base64']}
"""
imported_modules = []
import_line = False
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
import_line = False
elif token_string == "import":
try:
if tokens[index-1][0] == tokenize.NEWLINE:
import_line = True
except IndexError:
import_line = True # Just means this is the first line
elif import_line:
if token_type == tokenize.NAME and tokens[index+1][1] != 'as':
if token_string not in reserved_words:
if token_string not in imported_modules:
imported_modules.append(token_string)
return imported_modules
|
Returns a dictionary of all dynamically imported modules (those inside of
classes or functions) in the form of {<func or class name>: [<modules>]}
Example:
>>> enumerate_dynamic_modules(tokens)
{'myfunc': ['zlib', 'base64']}
|
def handle_moban_file(moban_file, options):
"""
act upon default moban file
"""
moban_file_configurations = load_data(None, moban_file)
if moban_file_configurations is None:
raise exceptions.MobanfileGrammarException(
constants.ERROR_INVALID_MOBAN_FILE % moban_file
)
if (
constants.LABEL_TARGETS not in moban_file_configurations
and constants.LABEL_COPY not in moban_file_configurations
):
raise exceptions.MobanfileGrammarException(
constants.ERROR_NO_TARGETS % moban_file
)
check_none(moban_file_configurations, moban_file)
version = moban_file_configurations.get(
constants.MOBAN_VERSION, constants.DEFAULT_MOBAN_VERSION
)
if version == constants.DEFAULT_MOBAN_VERSION:
mobanfile.handle_moban_file_v1(moban_file_configurations, options)
else:
raise exceptions.MobanfileGrammarException(
constants.MESSAGE_FILE_VERSION_NOT_SUPPORTED % version
)
HASH_STORE.save_hashes()
|
act upon default moban file
|
def execute_proc(procname, args=()):
"""
Execute a stored procedure. Returns the number of affected rows.
"""
ctx = Context.current()
with ctx.mdr:
cursor = ctx.execute_proc(procname, args)
row_count = cursor.rowcount
_safe_close(cursor)
return row_count
|
Execute a stored procedure. Returns the number of affected rows.
|
def remove_formatting_codes(line, irc=False):
"""Remove girc control codes from the given line."""
if irc:
line = escape(line)
new_line = ''
while len(line) > 0:
try:
if line[0] == '$':
line = line[1:]
if line[0] == '$':
new_line += '$'
line = line[1:]
elif line[0] == 'c':
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0] == ',':
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0].isdigit():
line = line[1:]
elif line[0] == ',':
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0] == '[':
while line[0] != ']':
line = line[1:]
line = line[1:]
elif line[0] == '{':
if line[:3] == '{$}':
new_line += '$'
line = line[3:]
continue
while line[0] != '}':
line = line[1:]
line = line[1:]
else:
line = line[1:]
else:
new_line += line[0]
line = line[1:]
except IndexError:
continue
return new_line
|
Remove girc control codes from the given line.
|
def _init_metadata(self):
"""stub"""
self._inline_regions_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'inline_regions'),
'element_label': 'set of inline regions',
'instructions': 'submit correct choice for answer for each region',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
}
self._choice_ids_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choice_ids'),
'element_label': 'response set with inline regions',
'instructions': 'submit correct choice for answer for each region',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [[]],
'syntax': 'OBJECT',
}
self._choice_id_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choice_id'),
'element_label': 'response set',
'instructions': 'submit correct choice for answer',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': []
}
|
stub
|
def modify(connect_spec, dn, directives):
'''Modify an entry in an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param directives:
Iterable of directives that indicate how to modify the entry.
Each directive is a tuple of the form ``(op, attr, vals)``,
where:
* ``op`` identifies the modification operation to perform.
One of:
* ``'add'`` to add one or more values to the attribute
* ``'delete'`` to delete some or all of the values from the
attribute. If no values are specified with this
operation, all of the attribute's values are deleted.
Otherwise, only the named values are deleted.
* ``'replace'`` to replace all of the attribute's values
with zero or more new values
* ``attr`` names the attribute to modify
* ``vals`` is an iterable of values to add or delete
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.modify "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
directives="('add', 'example', ['example_val'])"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modify_s() expects (also to ensure that the caller's objects are
# not modified)
modlist = [(getattr(ldap, 'MOD_' + op.upper()), attr, list(vals))
for op, attr, vals in directives]
for idx, mod in enumerate(modlist):
if mod[1] == 'unicodePwd':
modlist[idx] = (mod[0], mod[1],
[_format_unicode_password(x) for x in mod[2]])
modlist = salt.utils.data.decode(modlist, to_str=True, preserve_tuples=True)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
|
Modify an entry in an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param directives:
Iterable of directives that indicate how to modify the entry.
Each directive is a tuple of the form ``(op, attr, vals)``,
where:
* ``op`` identifies the modification operation to perform.
One of:
* ``'add'`` to add one or more values to the attribute
* ``'delete'`` to delete some or all of the values from the
attribute. If no values are specified with this
operation, all of the attribute's values are deleted.
Otherwise, only the named values are deleted.
* ``'replace'`` to replace all of the attribute's values
with zero or more new values
* ``attr`` names the attribute to modify
* ``vals`` is an iterable of values to add or delete
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.modify "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
directives="('add', 'example', ['example_val'])"
|
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
ns.update(sys._getframe(depth+1).f_locals)
ns.pop('self', None)
try:
cmd = formatter.format(cmd, **ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
|
Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
|
def trash(self, request, **kwargs):
"""Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index
Content is not actually deleted, merely hidden by deleted from ES index.import
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
content = self.get_object()
content.indexed = False
content.save()
LogEntry.objects.log(request.user, content, "Trashed")
return Response({"status": "Trashed"})
|
Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index
Content is not actually deleted, merely hidden by deleted from ES index.import
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
|
def genl_msg_parser(ops, who, nlh, pp):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L85.
Positional arguments:
ops -- nl_cache_ops class instance.
who -- sockaddr_nl class instance.
nlh -- nlmsghdr class instance.
pp -- nl_parser_param class instance.
Returns:
Integer, cmd_msg_parser() output.
"""
if ops.co_genl is None:
raise BUG
return int(cmd_msg_parser(who, nlh, ops.co_genl, ops, pp))
|
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L85.
Positional arguments:
ops -- nl_cache_ops class instance.
who -- sockaddr_nl class instance.
nlh -- nlmsghdr class instance.
pp -- nl_parser_param class instance.
Returns:
Integer, cmd_msg_parser() output.
|
def parse_basic_type_str(old_normalizer):
"""
Modifies a normalizer to automatically parse the incoming type string. If
that type string does not represent a basic type (i.e. non-tuple type) or is
not parsable, the normalizer does nothing.
"""
@functools.wraps(old_normalizer)
def new_normalizer(type_str, data):
try:
abi_type = parse(type_str)
except ParseError:
# If type string is not parsable, do nothing
return type_str, data
if not isinstance(abi_type, BasicType):
return type_str, data
return old_normalizer(abi_type, type_str, data)
return new_normalizer
|
Modifies a normalizer to automatically parse the incoming type string. If
that type string does not represent a basic type (i.e. non-tuple type) or is
not parsable, the normalizer does nothing.
|
def cat_trials(x3d):
"""Concatenate trials along time axis.
Parameters
----------
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples.
Returns
-------
x2d : array, shape (m, t * n)
Trials are concatenated along the second axis.
See also
--------
cut_segments : Cut segments from continuous data.
Examples
--------
>>> x = np.random.randn(6, 4, 150)
>>> y = cat_trials(x)
>>> y.shape
(4, 900)
"""
x3d = atleast_3d(x3d)
t = x3d.shape[0]
return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0)
|
Concatenate trials along time axis.
Parameters
----------
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples.
Returns
-------
x2d : array, shape (m, t * n)
Trials are concatenated along the second axis.
See also
--------
cut_segments : Cut segments from continuous data.
Examples
--------
>>> x = np.random.randn(6, 4, 150)
>>> y = cat_trials(x)
>>> y.shape
(4, 900)
|
def from_tuples_array(tuples):
"""
Creates a new StringValueMap from a list of key-value pairs called tuples.
The method is similar to [[fromTuples]] but tuples are passed as array instead of parameters.
:param tuples: a list of values where odd elements are keys and the following even elements are values
:return: a newly created StringValueMap.
"""
result = StringValueMap()
if tuples == None or len(tuples) == 0:
return result
index = 0
while index < len(tuples):
if index + 1 >= len(tuples):
break
key = StringConverter.to_string(tuples[index])
value = StringConverter.to_nullable_string(tuples[index + 1])
index += 2
result.put(key, value)
return result
|
Creates a new StringValueMap from a list of key-value pairs called tuples.
The method is similar to [[fromTuples]] but tuples are passed as array instead of parameters.
:param tuples: a list of values where odd elements are keys and the following even elements are values
:return: a newly created StringValueMap.
|
def stop(self):
"""Send a TX_DELETE message to cancel this task.
This will delete the entry for the transmission of the CAN-message
with the specified can_id CAN identifier. The message length for the command
TX_DELETE is {[bcm_msg_head]} (only the header).
"""
log.debug("Stopping periodic task")
stopframe = build_bcm_tx_delete_header(self.can_id_with_flags, self.flags)
send_bcm(self.bcm_socket, stopframe)
|
Send a TX_DELETE message to cancel this task.
This will delete the entry for the transmission of the CAN-message
with the specified can_id CAN identifier. The message length for the command
TX_DELETE is {[bcm_msg_head]} (only the header).
|
def write(self, target, *args, **kwargs):
"""Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs)
|
Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----
|
def subdivide(network, pores, shape, labels=[]):
r'''
It trim the pores and replace them by cubic networks with the sent shape.
Parameters
----------
network : OpenPNM Network Object
pores : array_like
The first group of pores to be replaced
shape : array_like
The shape of cubic networks in the target locations
Notes
-----
- It works only for cubic networks.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 6, 5], spacing=0.001)
>>> pn.Np
150
>>> nano_pores = [2, 13, 14, 15]
>>> op.topotools.subdivide(network=pn, pores=nano_pores, shape=[4, 7, 3],
... labels='nano')
>>> pn.Np
482
'''
mro = network._mro()
if 'Cubic' not in mro:
raise Exception('Subdivide is only supported for Cubic Networks')
from openpnm.network import Cubic
pores = network._parse_indices(pores)
# Checks to find boundary pores in the selected pores
if 'pore.boundary' in network.labels():
if (sp.in1d(pores, network.pores('boundary'))).any():
raise Exception('boundary pores cannot be subdivided!')
if not hasattr(network, '_subdivide_flag'):
network._subdivide_flag = True
else:
raise Exception('The network has subdivided pores, so the method \
does not support another subdivision')
# Assigning right shape and division
if sp.size(shape) != 2 and sp.size(shape) != 3:
raise Exception('Subdivide not implemented for Networks other than 2D \
and 3D')
elif sp.size(shape) == 3 and 1 not in shape:
div = sp.array(shape, ndmin=1)
single_dim = None
else:
single_dim = sp.where(sp.array(network.shape) == 1)[0]
if sp.size(single_dim) == 0:
single_dim = None
if sp.size(shape) == 3:
div = sp.array(shape, ndmin=1)
else:
div = sp.zeros(3, dtype=sp.int32)
if single_dim is None:
dim = 2
else:
dim = single_dim
div[dim] = 1
div[-sp.array(div, ndmin=1, dtype=bool)] = sp.array(shape, ndmin=1)
# Creating small network and handling labels
networkspacing = network.spacing
new_netspacing = networkspacing/div
new_net = Cubic(shape=div, spacing=new_netspacing)
main_labels = ['left', 'right', 'front', 'back', 'top', 'bottom']
if single_dim is not None:
label_groups = sp.array([['front', 'back'],
['left', 'right'],
['top', 'bottom']])
non_single_labels = label_groups[sp.array([0, 1, 2]) != single_dim]
for l in main_labels:
new_net['pore.surface_' + l] = False
network['pore.surface_' + l] = False
if single_dim is None:
new_net['pore.surface_' + l][new_net.pores(labels=l)] = True
else:
for ind in [0, 1]:
loc = (non_single_labels[ind] == l)
temp_pores = new_net.pores(non_single_labels[ind][loc])
new_net['pore.surface_' + l][temp_pores] = True
old_coords = sp.copy(new_net['pore.coords'])
if labels == []:
labels = ['pore.subdivided_' + new_net.name]
for P in pores:
# Shifting the new network to the right location and attaching it to
# the main network
shift = network['pore.coords'][P] - networkspacing/2
new_net['pore.coords'] += shift
Pn = network.find_neighbor_pores(pores=P)
try:
Pn_new_net = network.pores(labels)
except KeyError:
Pn_new_net = []
Pn_old_net = Pn[~sp.in1d(Pn, Pn_new_net)]
Np1 = network.Np
extend(pore_coords=new_net['pore.coords'],
throat_conns=new_net['throat.conns'] + Np1,
labels=labels, network=network)
# Moving the temporary labels to the big network
for l in main_labels:
network['pore.surface_'+l][Np1:] = new_net['pore.surface_'+l]
# Stitching the old pores of the main network to the new extended pores
surf_pores = network.pores('surface_*')
surf_coord = network['pore.coords'][surf_pores]
for neighbor in Pn:
neighbor_coord = network['pore.coords'][neighbor]
dist = [round(sp.inner(neighbor_coord-x, neighbor_coord-x),
20) for x in surf_coord]
nearest_neighbor = surf_pores[dist == sp.amin(dist)]
if neighbor in Pn_old_net:
coplanar_labels = network.labels(pores=nearest_neighbor)
new_neighbors = network.pores(coplanar_labels,
mode='xnor')
# This might happen to the edge of the small network
if sp.size(new_neighbors) == 0:
labels = network.labels(pores=nearest_neighbor,
mode='xnor')
common_label = [l for l in labels if 'surface_' in l]
new_neighbors = network.pores(common_label)
elif neighbor in Pn_new_net:
new_neighbors = nearest_neighbor
connect_pores(network=network, pores1=neighbor,
pores2=new_neighbors, labels=labels)
# Removing temporary labels
for l in main_labels:
network['pore.surface_' + l] = False
new_net['pore.coords'] = sp.copy(old_coords)
label_faces(network=network)
for l in main_labels:
del network['pore.surface_'+l]
trim(network=network, pores=pores)
ws = network.project.workspace
ws.close_project(new_net.project)
|
r'''
It trim the pores and replace them by cubic networks with the sent shape.
Parameters
----------
network : OpenPNM Network Object
pores : array_like
The first group of pores to be replaced
shape : array_like
The shape of cubic networks in the target locations
Notes
-----
- It works only for cubic networks.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 6, 5], spacing=0.001)
>>> pn.Np
150
>>> nano_pores = [2, 13, 14, 15]
>>> op.topotools.subdivide(network=pn, pores=nano_pores, shape=[4, 7, 3],
... labels='nano')
>>> pn.Np
482
|
def print_objective(x):
"""Calculate the objective value and prints it."""
value = 0
for minp, maxp in rectangles:
x_proj = np.minimum(np.maximum(x, minp), maxp)
value += (x - x_proj).norm()
print('Point = [{:.4f}, {:.4f}], Value = {:.4f}'.format(x[0], x[1], value))
|
Calculate the objective value and prints it.
|
def collapse_pane(self, side):
"""
Toggle collapsing the left or right panes.
"""
# TODO: this is too tied to one configuration, need to figure
# out how to generalize this
hsplit = self.w['hpnl']
sizes = hsplit.get_sizes()
lsize, msize, rsize = sizes
if self._lsize is None:
self._lsize, self._rsize = lsize, rsize
self.logger.debug("left=%d mid=%d right=%d" % (
lsize, msize, rsize))
if side == 'right':
if rsize < 10:
# restore pane
rsize = self._rsize
msize -= rsize
else:
# minimize pane
self._rsize = rsize
msize += rsize
rsize = 0
elif side == 'left':
if lsize < 10:
# restore pane
lsize = self._lsize
msize -= lsize
else:
# minimize pane
self._lsize = lsize
msize += lsize
lsize = 0
hsplit.set_sizes([lsize, msize, rsize])
|
Toggle collapsing the left or right panes.
|
def icon(self):
"""
Returns the icon filepath for this plugin.
:return <str>
"""
path = self._icon
if not path:
return ''
path = os.path.expandvars(os.path.expanduser(path))
if path.startswith('.'):
base_path = os.path.dirname(self.filepath())
path = os.path.abspath(os.path.join(base_path, path))
return path
|
Returns the icon filepath for this plugin.
:return <str>
|
def get_registry_records_by_keyword(keyword=None):
"""Get all the registry records (names and values) whose name
contains the specified keyword or, if keyword is None, return
all registry items
:param keyword: The keyword that has to be contained in the record name
:type keyword: str or None
:returns: Dictionary mapping the names of the found records to its values
"""
portal_reg = ploneapi.portal.get_tool(name="portal_registry")
found_registers = {}
for record in portal_reg.records:
if keyword is None:
found_registers[record] = api.get_registry_record(record)
elif keyword.lower() in record.lower():
found_registers[record] = api.get_registry_record(record)
return found_registers
|
Get all the registry records (names and values) whose name
contains the specified keyword or, if keyword is None, return
all registry items
:param keyword: The keyword that has to be contained in the record name
:type keyword: str or None
:returns: Dictionary mapping the names of the found records to its values
|
def _updateVariantAnnotationSets(self, variantFile, dataUrl):
"""
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
"""
# TODO check the consistency of this between VCF files.
if not self.isAnnotated():
annotationType = None
for record in variantFile.header.records:
if record.type == "GENERIC":
if record.key == "SnpEffVersion":
annotationType = ANNOTATIONS_SNPEFF
elif record.key == "VEP":
version = record.value.split()[0]
# TODO we need _much_ more sophisticated processing
# of VEP versions here. When do they become
# incompatible?
if version == "v82":
annotationType = ANNOTATIONS_VEP_V82
elif version == "v77":
annotationType = ANNOTATIONS_VEP_V77
else:
# TODO raise a proper typed exception there with
# the file name as an argument.
raise ValueError(
"Unsupported VEP version {} in '{}'".format(
version, dataUrl))
if annotationType is None:
infoKeys = variantFile.header.info.keys()
if 'CSQ' in infoKeys or 'ANN' in infoKeys:
# TODO likewise, we want a properly typed exception that
# we can throw back to the repo manager UI and display
# as an import error.
raise ValueError(
"Unsupported annotations in '{}'".format(dataUrl))
if annotationType is not None:
vas = HtslibVariantAnnotationSet(self, self.getLocalId())
vas.populateFromFile(variantFile, annotationType)
self.addVariantAnnotationSet(vas)
|
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
|
def list_keyvaults(access_token, subscription_id, rgname):
'''Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token)
|
Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK.
|
def do_help(self, line):
"""Displays help information."""
print ""
print "Perfdump CLI provides a handful of simple ways to query your"
print "performance data."
print ""
print "The simplest queries are of the form:"
print ""
print "\t[slowest|fastest] [tests|setups]"
print ""
print "For example:"
print ""
print "\tperfdump > slowest tests"
print ""
print "Prints the slowest 10 tests"
print ""
print "Additional grouping of results can be request."
print ""
print "\tperfdump > slowest tests groupby file"
print ""
print "Grouping options include:"
print ""
print "\tfile | module | class | function"
print ""
|
Displays help information.
|
def title(self) -> str:
"""Get/Set title string of this document."""
title_element = _find_tag(self.head, 'title')
if title_element:
return title_element.textContent
return ''
|
Get/Set title string of this document.
|
def get(self, mode, metric):
"""Get the history for the given metric and mode."""
if mode not in self._values:
logging.info("Metric %s not found for mode %s", metric, mode)
return []
return list(self._values[mode][metric])
|
Get the history for the given metric and mode.
|
def serial_udb_extra_f4_send(self, sue_ROLL_STABILIZATION_AILERONS, sue_ROLL_STABILIZATION_RUDDER, sue_PITCH_STABILIZATION, sue_YAW_STABILIZATION_RUDDER, sue_YAW_STABILIZATION_AILERON, sue_AILERON_NAVIGATION, sue_RUDDER_NAVIGATION, sue_ALTITUDEHOLD_STABILIZED, sue_ALTITUDEHOLD_WAYPOINT, sue_RACING_MODE, force_mavlink1=False):
'''
Backwards compatible version of SERIAL_UDB_EXTRA F4: format
sue_ROLL_STABILIZATION_AILERONS : Serial UDB Extra Roll Stabilization with Ailerons Enabled (uint8_t)
sue_ROLL_STABILIZATION_RUDDER : Serial UDB Extra Roll Stabilization with Rudder Enabled (uint8_t)
sue_PITCH_STABILIZATION : Serial UDB Extra Pitch Stabilization Enabled (uint8_t)
sue_YAW_STABILIZATION_RUDDER : Serial UDB Extra Yaw Stabilization using Rudder Enabled (uint8_t)
sue_YAW_STABILIZATION_AILERON : Serial UDB Extra Yaw Stabilization using Ailerons Enabled (uint8_t)
sue_AILERON_NAVIGATION : Serial UDB Extra Navigation with Ailerons Enabled (uint8_t)
sue_RUDDER_NAVIGATION : Serial UDB Extra Navigation with Rudder Enabled (uint8_t)
sue_ALTITUDEHOLD_STABILIZED : Serial UDB Extra Type of Alitude Hold when in Stabilized Mode (uint8_t)
sue_ALTITUDEHOLD_WAYPOINT : Serial UDB Extra Type of Alitude Hold when in Waypoint Mode (uint8_t)
sue_RACING_MODE : Serial UDB Extra Firmware racing mode enabled (uint8_t)
'''
return self.send(self.serial_udb_extra_f4_encode(sue_ROLL_STABILIZATION_AILERONS, sue_ROLL_STABILIZATION_RUDDER, sue_PITCH_STABILIZATION, sue_YAW_STABILIZATION_RUDDER, sue_YAW_STABILIZATION_AILERON, sue_AILERON_NAVIGATION, sue_RUDDER_NAVIGATION, sue_ALTITUDEHOLD_STABILIZED, sue_ALTITUDEHOLD_WAYPOINT, sue_RACING_MODE), force_mavlink1=force_mavlink1)
|
Backwards compatible version of SERIAL_UDB_EXTRA F4: format
sue_ROLL_STABILIZATION_AILERONS : Serial UDB Extra Roll Stabilization with Ailerons Enabled (uint8_t)
sue_ROLL_STABILIZATION_RUDDER : Serial UDB Extra Roll Stabilization with Rudder Enabled (uint8_t)
sue_PITCH_STABILIZATION : Serial UDB Extra Pitch Stabilization Enabled (uint8_t)
sue_YAW_STABILIZATION_RUDDER : Serial UDB Extra Yaw Stabilization using Rudder Enabled (uint8_t)
sue_YAW_STABILIZATION_AILERON : Serial UDB Extra Yaw Stabilization using Ailerons Enabled (uint8_t)
sue_AILERON_NAVIGATION : Serial UDB Extra Navigation with Ailerons Enabled (uint8_t)
sue_RUDDER_NAVIGATION : Serial UDB Extra Navigation with Rudder Enabled (uint8_t)
sue_ALTITUDEHOLD_STABILIZED : Serial UDB Extra Type of Alitude Hold when in Stabilized Mode (uint8_t)
sue_ALTITUDEHOLD_WAYPOINT : Serial UDB Extra Type of Alitude Hold when in Waypoint Mode (uint8_t)
sue_RACING_MODE : Serial UDB Extra Firmware racing mode enabled (uint8_t)
|
def filepaths(path, exclude=(), hidden=True, empty=True):
"""
Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist.
"""
if not os.path.exists(path):
raise error.PathNotFoundError(path)
elif not os.access(path, os.R_OK,
effective_ids=os.access in os.supports_effective_ids):
raise error.ReadError(errno.EACCES, path)
if os.path.isfile(path):
return [path]
else:
filepaths = []
for dirpath, dirnames, filenames in os.walk(path):
# Ignore hidden directory
if not hidden and is_hidden(dirpath):
continue
for filename in filenames:
# Ignore hidden file
if not hidden and is_hidden(filename):
continue
filepath = os.path.join(dirpath, filename)
# Ignore excluded file
if any(is_match(filepath, pattern) for pattern in exclude):
continue
else:
# Ignore empty file
if empty or os.path.getsize(os.path.realpath(filepath)) > 0:
filepaths.append(filepath)
return sorted(filepaths, key=lambda fp: fp.casefold())
|
Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist.
|
def save_failed_dump(self):
"""
Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure.
"""
# try/except for safety, to not break live spiders
try:
# FIXME
if (self.transport.__class__.__name__ == 'Urllib3Transport'
and not getattr(self.transport, '_response', None)):
self.doc = None
else:
self.doc = self.transport.prepare_response(self)
self.copy_request_data()
self.save_dumps()
except Exception as ex: # pylint: disable=broad-except
logger.error('', exc_info=ex)
|
Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.