repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Stanford-Online/edx-platform | lms/djangoapps/support/views/certificate.py | 23 | 1319 | """
Certificate tool in the student support app.
"""
import urllib
from django.utils.decorators import method_decorator
from django.views.generic import View
from edxmako.shortcuts import render_to_response
from support.decorators import require_support_permission
class CertificatesSupportView(View):
"""
View for viewing and regenerating certificates for users.
This is used by the support team to re-issue certificates
to users if something went wrong during the initial certificate generation,
such as:
* The user's name was spelled incorrectly.
* The user later earned a higher grade and wants it on his/her certificate and dashboard.
* The user accidentally received an honor code certificate because his/her
verification expired before certs were generated.
Most of the heavy lifting is performed client-side through API
calls directly to the certificates app.
"""
@method_decorator(require_support_permission)
def get(self, request):
"""Render the certificates support view. """
context = {
"user_filter": urllib.unquote(urllib.quote_plus(request.GET.get("user", ""))),
"course_filter": request.GET.get("course_id", "")
}
return render_to_response("support/certificates.html", context)
| agpl-3.0 |
ojengwa/oh-mainline | vendor/packages/docutils/docutils/parsers/rst/roles.py | 108 | 14739 | # $Id: roles.py 7514 2012-09-14 14:27:12Z milde $
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Copyright: This module has been placed in the public domain.
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
from docutils.utils.code_analyzer import Lexer, LexerError
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
set_classes(options)
language = options.get('language', '')
classes = ['code']
if 'classes' in options:
classes.extend(options['classes'])
if language and language not in classes:
classes.append(language)
try:
tokens = Lexer(utils.unescape(text, 1), language,
inliner.document.settings.syntax_highlight)
except LexerError, error:
msg = inliner.reporter.warning(error)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.literal(rawtext, '', classes=classes)
# analyze content and add nodes for every token
for classes, value in tokens:
# print (classes, value)
if classes:
node += nodes.inline(value, value, classes=classes)
else:
# insert as Text to decrease the verbosity of the output
node += nodes.Text(value, value)
return [node], []
code_role.options = {'class': directives.class_option,
'language': directives.unchanged}
register_canonical_role('code', code_role)
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
i = rawtext.find('`')
text = rawtext.split('`')[1]
node = nodes.math(rawtext, text)
return [node], []
register_canonical_role('math', math_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
| agpl-3.0 |
davenovak/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/rules/gyptest-default.py | 226 | 1431 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
Hello from program.c
Hello from function1.in
Hello from function2.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
expect = """\
Hello from program.c
Hello from function3.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('program2', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file1.out', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.out2', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out2', 'Hello from file2.in\n')
test.must_match('relocate/src/external/file1.external_rules.out',
'Hello from file1.in\n')
test.must_match('relocate/src/external/file2.external_rules.out',
'Hello from file2.in\n')
test.pass_test()
| gpl-3.0 |
cogitate/twitter-zipkin-uuid | doc/src/sphinx/exts/includecode.py | 121 | 5444 | import os
import codecs
from os import path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
class IncludeCode(Directive):
"""
Include a code example from a file with sections delimited with special comments.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'section': directives.unchanged_required,
'comment': directives.unchanged_required,
'marker': directives.unchanged_required,
'include': directives.unchanged_required,
'exclude': directives.unchanged_required,
'hideexcludes': directives.flag,
'linenos': directives.flag,
'language': directives.unchanged_required,
'encoding': directives.encoding,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
}
def run(self):
document = self.state.document
arg0 = self.arguments[0]
(filename, sep, section) = arg0.partition('#')
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
if filename.startswith('/') or filename.startswith(os.sep):
rel_fn = filename[1:]
else:
docdir = path.dirname(env.doc2path(env.docname, base=None))
rel_fn = path.join(docdir, filename)
try:
fn = path.join(env.srcdir, rel_fn)
except UnicodeDecodeError:
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
rel_fn = rel_fn.encode(sys.getfilesystemencoding())
fn = path.join(env.srcdir, rel_fn)
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
try:
f = codecs.StreamReaderWriter(open(fn, 'U'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
f.close()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
comment = self.options.get('comment', '//')
marker = self.options.get('marker', comment + '#')
lenm = len(marker)
if not section:
section = self.options.get('section')
include_sections = self.options.get('include', '')
exclude_sections = self.options.get('exclude', '')
include = set(include_sections.split(',')) if include_sections else set()
exclude = set(exclude_sections.split(',')) if exclude_sections else set()
hideexcludes = 'hideexcludes' in self.options
if section:
include |= set([section])
within = set()
res = []
excluding = False
for line in lines:
index = line.find(marker)
if index >= 0:
section_name = line[index+lenm:].strip()
if section_name in within:
within ^= set([section_name])
if excluding and not (exclude & within):
excluding = False
else:
within |= set([section_name])
if not excluding and (exclude & within):
excluding = True
if not hideexcludes:
res.append(' ' * index + comment + ' ' + section_name.replace('-', ' ') + ' ...\n')
elif not (exclude & within) and (not include or (include & within)):
res.append(line)
lines = res
def countwhile(predicate, iterable):
count = 0
for x in iterable:
if predicate(x):
count += 1
else:
return count
nonempty = filter(lambda l: l.strip(), lines)
tabcounts = map(lambda l: countwhile(lambda c: c == ' ', l), nonempty)
tabshift = min(tabcounts) if tabcounts else 0
if tabshift > 0:
lines = map(lambda l: l[tabshift:] if len(l) > tabshift else l, lines)
prepend = self.options.get('prepend')
append = self.options.get('append')
if prepend:
lines.insert(0, prepend + '\n')
if append:
lines.append(append + '\n')
text = ''.join(lines)
retnode = nodes.literal_block(text, text, source=fn)
retnode.line = 1
retnode.attributes['line_number'] = self.lineno
language = self.options.get('language')
if language:
retnode['language'] = language
if 'linenos' in self.options:
retnode['linenos'] = True
document.settings.env.note_dependency(rel_fn)
return [retnode]
def setup(app):
app.require_sphinx('1.0')
app.add_directive('includecode', IncludeCode)
| apache-2.0 |
amyvmiwei/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 303 | 9236 | from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| lgpl-3.0 |
candlepin/rho | src/rho/crypto.py | 2 | 5263 | #
# Copyright (c) 2009 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
""" Configuration Encryption Module """
import os
import os.path
from rho.log import log
# From the python-crypto package
from Crypto.Cipher import AES
from rho.PBKDF2 import PBKDF2
from config import CONFIG_VERSION
class BadKeyException(Exception):
pass
class NoSuchFileException(Exception):
pass
class DecryptionException(Exception):
pass
class AESEncrypter(object):
"""
Simple to use object for AES-128 encryption.
Based on contribution from Steve Milner.
"""
def __init__(self, password, salt, iv, key_length=16):
"""
Creates a new instance of AESEncrypter.
:Parameters:
- `key`: encryption/decryption key
- `pad_char`: ASCII character to pad with.
"""
self.__key_length = key_length
self.__key = self.__create_key(salt, password)
if self.__key_length != len(self.__key):
raise Exception("Key does not match length: %s" %
self.__key_length)
self.__pad_char = " "
self.__cipher_obj = AES.new(self.__key, AES.MODE_CFB, iv)
def __create_key(self, salt, password):
"""
Creates a key to use for encryption using the given password.
"""
return PBKDF2(password, salt).read(self.__key_length)
def encrypt(self, data):
"""
Pads and encrypts the data.
:Parameters:
- `data`: pad and data to encrypt.
"""
data = self.pad(data, self.__cipher_obj.block_size)
return self.__cipher_obj.encrypt(data)
def decrypt(self, ciphertext):
"""
Decrypts data and removes padding.
:Parameters:
- `data`: the data to decrypt and removing padding on.
"""
data = self.__cipher_obj.decrypt(ciphertext)
if len(data):
data = self.unpad(data, self.__cipher_obj.block_size)
return data
# see http://tools.ietf.org/html/rfc3852#section-6.3
def pad(self, data, length):
assert length < 256
assert length > 0
padlen = length - len(data) % length
assert padlen <= length
assert padlen > 0
return data + padlen * chr(padlen)
def unpad(self, data, length):
assert length < 256
assert length > 0
padlen = ord(data[-1])
assert padlen <= length
assert padlen > 0
assert data[-padlen:] == padlen * chr(padlen)
return data[:-padlen]
# read-only properties
pad_char = property(lambda self: self.__pad_char)
key = property(lambda self: self.__key)
key_length = property(lambda self: self.__key_length)
def encrypt(plaintext, key, salt, iv):
"""
Encrypt the plaintext using the given key.
"""
encrypter = AESEncrypter(key, salt, iv)
return encrypter.encrypt(plaintext)
def decrypt(ciphertext, key, salt, iv):
"""
Decrypt the ciphertext with the given key.
"""
encrypter = AESEncrypter(key, salt, iv)
decrypted_plaintext = encrypter.decrypt(ciphertext)
return decrypted_plaintext
# TODO: is there a way to know decryption failed?
# if return_me is None:
# Looks like decryption failed, probably a bad key:
# raise BadKeyException
def write_file(filename, plaintext, key):
"""
Encrypt plaintext with the given key and write to file.
Existing file will be overwritten so be careful.
"""
f = open(filename, 'w')
salt = os.urandom(8)
iv = os.urandom(16)
# a hex version string
f.write("%2X" % CONFIG_VERSION)
f.write(salt)
f.write(iv)
log.debug("version: %s salt: %s iv: %s" % (CONFIG_VERSION, salt, iv))
f.write(encrypt(plaintext, key, salt, iv))
f.close()
def read_file(filename, password):
"""
Decrypt contents of file with the given key, and return as a string.
Assume that we're reading files that we encrypted. (i.e. we're not trying
to read files encrypted manually with gpg)
Also note that the password here is the user password, not the actual
AES key. To get that we must read the first 8 bytes of the file to get
the correct salt to use to convert the password to the key.
Returns a tuple of (salt, json)
"""
if not os.path.exists(filename):
raise NoSuchFileException()
f = open(filename, 'r')
# 2 byte version in hex
ver = f.read(2)
# 8 byte salt
salt = f.read(8)
# 16 byte initialization vector
iv = f.read(16)
log.debug("Read version: %s salt: %s iv: %s" % (ver, salt, iv))
cont = f.read()
try:
return_me = decrypt(cont, password, salt, iv)
except Exception as e:
log.warn("Exception while decrypting the configuration file: %s" % e)
# raise
raise DecryptionException
f.close()
return return_me
| gpl-2.0 |
saurabh6790/trufil_lib | core/doctype/event/event.py | 16 | 5774 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import getdate, cint, add_months, date_diff, add_days, nowdate
weekdays = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def validate(self):
if self.doc.starts_on and self.doc.ends_on and self.doc.starts_on > self.doc.ends_on:
webnotes.msgprint(webnotes._("Event End must be after Start"), raise_exception=True)
def get_match_conditions():
return """(tabEvent.event_type='Public' or tabEvent.owner='%(user)s'
or exists(select * from `tabEvent User` where
`tabEvent User`.parent=tabEvent.name and `tabEvent User`.person='%(user)s')
or exists(select * from `tabEvent Role` where
`tabEvent Role`.parent=tabEvent.name
and `tabEvent Role`.role in ('%(roles)s')))
""" % {
"user": webnotes.session.user,
"roles": "', '".join(webnotes.get_roles(webnotes.session.user))
}
def send_event_digest():
today = nowdate()
for user in webnotes.conn.sql("""select name, email, language
from tabProfile where ifnull(enabled,0)=1
and user_type='System User' and name not in ('Guest', 'Administrator')""", as_dict=1):
events = get_events(today, today, user.name, for_reminder=True)
if events:
text = ""
webnotes.set_user_lang(user.name, user.language)
webnotes.load_translations("core", "doctype", "event")
text = "<h3>" + webnotes._("Events In Today's Calendar") + "</h3>"
for e in events:
if e.all_day:
e.starts_on = "All Day"
text += "<h4>%(starts_on)s: %(subject)s</h4><p>%(description)s</p>" % e
text += '<p style="color: #888; font-size: 80%; margin-top: 20px; padding-top: 10px; border-top: 1px solid #eee;">'\
+ webnotes._("Daily Event Digest is sent for Calendar Events where reminders are set.")+'</p>'
from webnotes.utils.email_lib import sendmail
sendmail(recipients=user.email, subject=webnotes._("Upcoming Events for Today"),
msg = text)
@webnotes.whitelist()
def get_events(start, end, user=None, for_reminder=False):
if not user:
user = webnotes.session.user
roles = webnotes.get_roles(user)
events = webnotes.conn.sql("""select name, subject, description,
starts_on, ends_on, owner, all_day, event_type, repeat_this_event, repeat_on,
monday, tuesday, wednesday, thursday, friday, saturday, sunday
from tabEvent where ((
(date(starts_on) between date('%(start)s') and date('%(end)s'))
or (date(ends_on) between date('%(start)s') and date('%(end)s'))
or (date(starts_on) <= date('%(start)s') and date(ends_on) >= date('%(end)s'))
) or (
date(starts_on) <= date('%(start)s') and ifnull(repeat_this_event,0)=1 and
ifnull(repeat_till, "3000-01-01") > date('%(start)s')
))
%(reminder_condition)s
and (event_type='Public' or owner='%(user)s'
or exists(select * from `tabEvent User` where
`tabEvent User`.parent=tabEvent.name and person='%(user)s')
or exists(select * from `tabEvent Role` where
`tabEvent Role`.parent=tabEvent.name
and `tabEvent Role`.role in ('%(roles)s')))
order by starts_on""" % {
"start": start,
"end": end,
"reminder_condition": "and ifnull(send_reminder,0)=1" if for_reminder else "",
"user": user,
"roles": "', '".join(roles)
}, as_dict=1)
# process recurring events
start = start.split(" ")[0]
end = end.split(" ")[0]
add_events = []
remove_events = []
def add_event(e, date):
new_event = e.copy()
new_event.starts_on = date + " " + e.starts_on.split(" ")[1]
if e.ends_on:
new_event.ends_on = date + " " + e.ends_on.split(" ")[1]
add_events.append(new_event)
for e in events:
if e.repeat_this_event:
event_start, time_str = e.starts_on.split(" ")
if e.repeat_on=="Every Year":
start_year = cint(start.split("-")[0])
end_year = cint(end.split("-")[0])
event_start = "-".join(event_start.split("-")[1:])
# repeat for all years in period
for year in range(start_year, end_year+1):
date = str(year) + "-" + event_start
if date >= start and date <= end:
add_event(e, date)
remove_events.append(e)
if e.repeat_on=="Every Month":
date = start.split("-")[0] + "-" + start.split("-")[1] + "-" + event_start.split("-")[2]
# last day of month issue, start from prev month!
try:
getdate(date)
except ValueError:
date = date.split("-")
date = date[0] + "-" + str(cint(date[1]) - 1) + "-" + date[2]
start_from = date
for i in xrange(int(date_diff(end, start) / 30) + 3):
if date >= start and date <= end and date >= event_start:
add_event(e, date)
date = add_months(start_from, i+1)
remove_events.append(e)
if e.repeat_on=="Every Week":
weekday = getdate(event_start).weekday()
# monday is 0
start_weekday = getdate(start).weekday()
# start from nearest weeday after last monday
date = add_days(start, weekday - start_weekday)
for cnt in xrange(int(date_diff(end, start) / 7) + 3):
if date >= start and date <= end and date >= event_start:
add_event(e, date)
date = add_days(date, 7)
remove_events.append(e)
if e.repeat_on=="Every Day":
for cnt in xrange(date_diff(end, start) + 1):
date = add_days(start, cnt)
if date >= event_start and date <= end \
and e[weekdays[getdate(date).weekday()]]:
add_event(e, date)
remove_events.append(e)
for e in remove_events:
events.remove(e)
events = events + add_events
for e in events:
# remove weekday properties (to reduce message size)
for w in weekdays:
del e[w]
return events
| mit |
thesquelched/libcloud | libcloud/common/cloudsigma.py | 29 | 4014 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'API_ENDPOINTS_1_0',
'API_ENDPOINTS_2_0',
'API_VERSIONS',
'INSTANCE_TYPES'
]
# API end-points
API_ENDPOINTS_1_0 = {
'zrh': {
'name': 'Zurich',
'country': 'Switzerland',
'host': 'api.zrh.cloudsigma.com'
},
'lvs': {
'name': 'Las Vegas',
'country': 'United States',
'host': 'api.lvs.cloudsigma.com'
}
}
API_ENDPOINTS_2_0 = {
'zrh': {
'name': 'Zurich',
'country': 'Switzerland',
'host': 'zrh.cloudsigma.com'
},
'sjc': {
'name': 'San Jose, CA',
'country': 'United States',
'host': 'sjc.cloudsigma.com'
},
'mia': {
'name': 'Miami, FL',
'country': 'United States',
'host': 'mia.cloudsigma.com'
},
'wdc': {
'name': 'Washington, DC',
'country': 'United States',
'host': 'wdc.cloudsigma.com'
},
'hnl': {
'name': 'Honolulu, HI',
'country': 'United States',
'host': 'hnl.cloudsigma.com'
}
}
DEFAULT_REGION = 'zrh'
# Supported API versions.
API_VERSIONS = [
'1.0' # old and deprecated
'2.0'
]
DEFAULT_API_VERSION = '2.0'
# CloudSigma doesn't specify special instance types.
# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work,
# 500 MB to 32000 MB for ram
# and 1 GB to 1024 GB for hard drive size.
# Plans in this file are based on examples listed on http://www.cloudsigma
# .com/en/pricing/price-schedules
INSTANCE_TYPES = [
{
'id': 'micro-regular',
'name': 'Micro/Regular instance',
'cpu': 1100,
'memory': 640,
'disk': 10 + 3,
'bandwidth': None,
},
{
'id': 'micro-high-cpu',
'name': 'Micro/High CPU instance',
'cpu': 2200,
'memory': 640,
'disk': 80,
'bandwidth': None,
},
{
'id': 'standard-small',
'name': 'Standard/Small instance',
'cpu': 1100,
'memory': 1741,
'disk': 50,
'bandwidth': None,
},
{
'id': 'standard-large',
'name': 'Standard/Large instance',
'cpu': 4400,
'memory': 7680,
'disk': 250,
'bandwidth': None,
},
{
'id': 'standard-extra-large',
'name': 'Standard/Extra Large instance',
'cpu': 8800,
'memory': 15360,
'disk': 500,
'bandwidth': None,
},
{
'id': 'high-memory-extra-large',
'name': 'High Memory/Extra Large instance',
'cpu': 7150,
'memory': 17510,
'disk': 250,
'bandwidth': None,
},
{
'id': 'high-memory-double-extra-large',
'name': 'High Memory/Double Extra Large instance',
'cpu': 14300,
'memory': 32768,
'disk': 500,
'bandwidth': None,
},
{
'id': 'high-cpu-medium',
'name': 'High CPU/Medium instance',
'cpu': 5500,
'memory': 1741,
'disk': 150,
'bandwidth': None,
},
{
'id': 'high-cpu-extra-large',
'name': 'High CPU/Extra Large instance',
'cpu': 20000,
'memory': 7168,
'disk': 500,
'bandwidth': None,
}
]
| apache-2.0 |
a710128/Lesson9 | API/course.py | 1 | 3339 | import re
class CourseException(Exception):
def __init__(self, msg, err):
super(CourseException, self).__init__()
self.msg = msg
self.err = err
def __str__(self):
return "CourseError : " + self.msg
def __repr__(self):
return '<CourseException msg : "%s", errcode : %d>' % (self.msg, self.errcode)
def courseTimeParser(timeStr):
assert isinstance(timeStr, str), "Parameter type error"
timeStr, _ = re.subn('\([^)]*\)', '', timeStr)
ret = []
for item in timeStr.split(","):
if item == '':
continue
ws, pd = item.split('-')
ret.append((int(ws), int(pd)))
return ret
class Course:
def __init__(self, **kwargs):
if 'kch' in kwargs and 'kxh' in kwargs:
self.kch = kwargs['kch']
self.kxh = kwargs['kxh']
elif 'kid' in kwargs:
vs = kwargs['kid'].split(':')
if len(vs) != 2:
raise CourseException("Wrong Course id parameter", 0)
self.kch = vs[0]
self.kxh = vs[1]
else:
raise CourseException("Invalid parameters when Course __init__!", 1)
self.name = ''
self.teacher = ''
self.time = []
self.score = 0
self.feature = ''
self.other = ''
params = {
'name': 'Unknown',
'teacher': 'Unknown',
'time': [],
'score': 0,
'feature': '',
'other': ''
}
for key in params:
if key in kwargs:
if isinstance(kwargs[key], type(params[key])):
self.__dict__[key] = kwargs[key]
else:
raise CourseException("Invalid parameters when Course __init__!", 1)
else:
self.__dict__[key] = params[key]
for item in self.time:
if (not isinstance(item, tuple)) or len(item) != 2 or (not isinstance(item[0], int)) or (not isinstance(item[1], int)):
raise CourseException("Invalid parameters when Course __init__!", 1)
def __eq__(self, other):
if self.kxh == '*' or other.kxh == '*':
return self.kch == other.kch
return self.kch == other.kch and self.kxh == other.kxh
def timeClash(self, other):
if isinstance(other, tuple):
for time in self.time:
if time == other:
return True
return False
elif isinstance(other, Course):
for time in self.time:
if other.timeClash(time):
return True
return False
else:
raise CourseException("Invalid parameters when Course timeClash!", 2)
def __str__(self):
ret = 'Course: %s:%s; Time : ' % (self.kch, self.kxh)
first = True
for wk, pd in self.time:
if first:
first = False
else:
ret += ','
ret += '%d-%d' % (wk, pd)
ret += '; Name: %s; Teacher: %s; Score: %d; Feature: %s; Other: %s' % (self.name, self.teacher, self.score, self.feature, self.other)
return ret
def __repr__(self):
return "<" + self.__str__() + ">"
def __hash__(self):
return hash(self.kch + ":" + self.kxh)
| mit |
knotwork/old-devcoin-qt | scripts/extract_strings_qt.py | 2 | 2027 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = ['src/base58.h', 'src/bignum.h', 'src/db.cpp', 'src/db.h', 'src/externui.h', 'src/headers.h', 'src/init.cpp', 'src/init.h', 'src/irc.cpp', 'src/irc.h', 'src/key.h', 'src/main.cpp', 'src/main.h', 'src/net.cpp', 'src/net.h', 'src/noui.h', 'src/rpc.cpp', 'src/rpc.h', 'src/script.cpp', 'src/script.h', 'src/serialize.h', 'src/strlcpy.h', 'src/uint256.h', 'src/util.cpp', 'src/util.h']
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write('#include <QtGlobal>\n')
f.write('// Automatically generated by extract_strings.py\n')
f.write('static const char *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
mrquim/mrquimrepo | script.module.livestreamer/lib/livestreamer/plugins/stream.py | 37 | 2374 | from livestreamer.compat import urlparse
from livestreamer.exceptions import PluginError
from livestreamer.plugin import Plugin
from livestreamer.stream import (AkamaiHDStream, HDSStream, HLSStream,
HTTPStream, RTMPStream)
import ast
import re
PROTOCOL_MAP = {
"akamaihd": AkamaiHDStream,
"hds": HDSStream.parse_manifest,
"hls": HLSStream,
"hlsvariant": HLSStream.parse_variant_playlist,
"httpstream": HTTPStream,
"rtmp": RTMPStream,
"rtmpe": RTMPStream,
"rtmps": RTMPStream,
"rtmpt": RTMPStream,
"rtmpte": RTMPStream
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
class StreamURL(Plugin):
@classmethod
def can_handle_url(self, url):
parsed = urlparse(url)
return parsed.scheme in PROTOCOL_MAP
def _parse_params(self, params):
rval = {}
matches = re.findall(PARAMS_REGEX, params)
for key, value in matches:
try:
value = ast.literal_eval(value)
except Exception:
pass
rval[key] = value
return rval
def _get_streams(self):
parsed = urlparse(self.url)
cls = PROTOCOL_MAP.get(parsed.scheme)
if not cls:
return
split = self.url.split(" ")
url = split[0]
urlnoproto = re.match("^\w+://(.+)", url).group(1)
# Prepend http:// if needed.
if cls != RTMPStream and not re.match("^http(s)?://", urlnoproto):
urlnoproto = "http://{0}".format(urlnoproto)
params = (" ").join(split[1:])
params = self._parse_params(params)
if cls == RTMPStream:
params["rtmp"] = url
for boolkey in ("live", "realtime", "quiet", "verbose", "debug"):
if boolkey in params:
params[boolkey] = bool(params[boolkey])
stream = cls(self.session, params)
elif cls == HLSStream.parse_variant_playlist or cls == HDSStream.parse_manifest:
try:
streams = cls(self.session, urlnoproto, **params)
except IOError as err:
raise PluginError(err)
return streams
else:
stream = cls(self.session, urlnoproto, **params)
return dict(live=stream)
__plugin__ = StreamURL
| gpl-2.0 |
swayf/pyLoad | module/lib/simplejson/__init__.py | 45 | 18626 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.2.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| agpl-3.0 |
marcore/edx-platform | cms/lib/xblock/tagging/migrations/0001_initial.py | 39 | 1187 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TagAvailableValues',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='TagCategories',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, unique=True)),
('title', models.CharField(max_length=255)),
],
options={
'ordering': ('title',),
},
),
migrations.AddField(
model_name='tagavailablevalues',
name='category',
field=models.ForeignKey(to='tagging.TagCategories'),
),
]
| agpl-3.0 |
hsgui/interest-only | deeplearning/reinforcementlearning/ghostAgents.py | 44 | 3108 | # ghostAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import Agent
from game import Actions
from game import Directions
import random
from util import manhattanDistance
import util
class GhostAgent( Agent ):
def __init__( self, index ):
self.index = index
def getAction( self, state ):
dist = self.getDistribution(state)
if len(dist) == 0:
return Directions.STOP
else:
return util.chooseFromDistribution( dist )
def getDistribution(self, state):
"Returns a Counter encoding a distribution over actions from the provided state."
util.raiseNotDefined()
class RandomGhost( GhostAgent ):
"A ghost that chooses a legal action uniformly at random."
def getDistribution( self, state ):
dist = util.Counter()
for a in state.getLegalActions( self.index ): dist[a] = 1.0
dist.normalize()
return dist
class DirectionalGhost( GhostAgent ):
"A ghost that prefers to rush Pacman, or flee when scared."
def __init__( self, index, prob_attack=0.8, prob_scaredFlee=0.8 ):
self.index = index
self.prob_attack = prob_attack
self.prob_scaredFlee = prob_scaredFlee
def getDistribution( self, state ):
# Read variables from state
ghostState = state.getGhostState( self.index )
legalActions = state.getLegalActions( self.index )
pos = state.getGhostPosition( self.index )
isScared = ghostState.scaredTimer > 0
speed = 1
if isScared: speed = 0.5
actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state
distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
if isScared:
bestScore = max( distancesToPacman )
bestProb = self.prob_scaredFlee
else:
bestScore = min( distancesToPacman )
bestProb = self.prob_attack
bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]
# Construct distribution
dist = util.Counter()
for a in bestActions: dist[a] = bestProb / len(bestActions)
for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
dist.normalize()
return dist
| gpl-2.0 |
gale320/newfies-dialer | newfies/apirest/view_callrequest.py | 4 | 1563 | # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from rest_framework import viewsets
from apirest.callrequest_serializers import CallrequestSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from dialer_cdr.models import Callrequest
from permissions import CustomObjectPermissions
class CallrequestViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows campaigns to be viewed or edited.
"""
model = Callrequest
queryset = Callrequest.objects.all()
serializer_class = CallrequestSerializer
authentication = (BasicAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, CustomObjectPermissions)
def pre_save(self, obj):
obj.user = self.request.user
def get_queryset(self):
"""
This view should return a list of all the callrequests
for the currently authenticated user.
"""
if self.request.user.is_superuser:
queryset = Callrequest.objects.all()
else:
queryset = Callrequest.objects.filter(user=self.request.user)
return queryset
| mpl-2.0 |
aldian/tensorflow | tensorflow/python/tools/strip_unused.py | 180 | 3786 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes unneeded nodes from a GraphDef file.
This script is designed to help streamline models, by taking the input and
output nodes that will be used by an application and figuring out the smallest
set of operations that are required to run for those arguments. The resulting
minimal graph is then saved out.
The advantages of running this script are:
- You may be able to shrink the file size.
- Operations that are unsupported on your platform but still present can be
safely removed.
The resulting graph may not be as flexible as the original though, since any
input nodes that weren't explicitly mentioned may not be accessible any more.
An example of command-line usage is:
bazel build tensorflow/python/tools:strip_unused && \
bazel-bin/tensorflow/python/tools/strip_unused \
--input_graph=some_graph_def.pb \
--output_graph=/tmp/stripped_graph.pb \
--input_node_names=input0
--output_node_names=softmax
You can also look at strip_unused_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import app
from tensorflow.python.tools import strip_unused_lib
FLAGS = None
def main(unused_args):
strip_unused_lib.strip_unused_from_files(FLAGS.input_graph,
FLAGS.input_binary,
FLAGS.output_graph,
FLAGS.output_binary,
FLAGS.input_node_names,
FLAGS.output_node_names,
FLAGS.placeholder_type_enum)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_graph',
type=str,
default='',
help='TensorFlow \'GraphDef\' file to load.')
parser.add_argument(
'--input_binary',
nargs='?',
const=True,
type='bool',
default=False,
help='Whether the input files are in binary format.')
parser.add_argument(
'--output_graph',
type=str,
default='',
help='Output \'GraphDef\' file name.')
parser.add_argument(
'--output_binary',
nargs='?',
const=True,
type='bool',
default=True,
help='Whether to write a binary format graph.')
parser.add_argument(
'--input_node_names',
type=str,
default='',
help='The name of the input nodes, comma separated.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--placeholder_type_enum',
type=int,
default=dtypes.float32.as_datatype_enum,
help='The AttrValue enum to use for placeholders.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
wuhengzhi/chromium-crosswalk | chrome/common/extensions/docs/server2/path_canonicalizer.py | 78 | 4806 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import posixpath
from future import Future
from path_util import SplitParent
from special_paths import SITE_VERIFICATION_FILE
def _Normalize(file_name, splittext=False):
normalized = file_name
if splittext:
normalized = posixpath.splitext(file_name)[0]
normalized = normalized.replace('.', '').replace('-', '').replace('_', '')
return normalized.lower()
def _CommonNormalizedPrefix(first_file, second_file):
return posixpath.commonprefix((_Normalize(first_file),
_Normalize(second_file)))
class PathCanonicalizer(object):
'''Transforms paths into their canonical forms. Since the docserver has had
many incarnations - e.g. there didn't use to be apps/ - there may be old
paths lying around the webs. We try to redirect those to where they are now.
'''
def __init__(self,
file_system,
object_store_creator,
strip_extensions):
# |strip_extensions| is a list of file extensions (e.g. .html) that should
# be stripped for a path's canonical form.
self._cache = object_store_creator.Create(
PathCanonicalizer, category=file_system.GetIdentity())
self._file_system = file_system
self._strip_extensions = strip_extensions
def _LoadCache(self):
def load(cached):
# |canonical_paths| is the pre-calculated set of canonical paths.
# |simplified_paths_map| is a lazily populated mapping of simplified file
# names to a list of full paths that contain them. For example,
# - browseraction: [extensions/browserAction.html]
# - storage: [apps/storage.html, extensions/storage.html]
canonical_paths, simplified_paths_map = (
cached.get('canonical_paths'), cached.get('simplified_paths_map'))
if canonical_paths is None:
assert simplified_paths_map is None
canonical_paths = set()
simplified_paths_map = defaultdict(list)
for base, dirs, files in self._file_system.Walk(''):
for path in dirs + files:
path_without_ext, ext = posixpath.splitext(path)
canonical_path = posixpath.join(base, path_without_ext)
if (ext not in self._strip_extensions or
path == SITE_VERIFICATION_FILE):
canonical_path += ext
canonical_paths.add(canonical_path)
simplified_paths_map[_Normalize(path, splittext=True)].append(
canonical_path)
# Store |simplified_paths_map| sorted. Ties in length are broken by
# taking the shortest, lexicographically smallest path.
for path_list in simplified_paths_map.itervalues():
path_list.sort(key=lambda p: (len(p), p))
self._cache.SetMulti({
'canonical_paths': canonical_paths,
'simplified_paths_map': simplified_paths_map,
})
else:
assert simplified_paths_map is not None
return canonical_paths, simplified_paths_map
return self._cache.GetMulti(('canonical_paths',
'simplified_paths_map')).Then(load)
def Canonicalize(self, path):
'''Returns the canonical path for |path|.
'''
canonical_paths, simplified_paths_map = self._LoadCache().Get()
# Path may already be the canonical path.
if path in canonical_paths:
return path
# Path not found. Our single heuristic: find |base| in the directory
# structure with the longest common prefix of |path|.
_, base = SplitParent(path)
# Paths with a non-extension dot separator lose information in
# _SimplifyFileName, so we try paths both with and without the dot to
# maximize the possibility of finding the right path.
potential_paths = (
simplified_paths_map.get(_Normalize(base), []) +
simplified_paths_map.get(_Normalize(base, splittext=True), []))
if potential_paths == []:
# There is no file with anything close to that name.
return path
# The most likely canonical file is the one with the longest common prefix
# with |path|. This is slightly weaker than it could be; |path| is
# compared without symbols, not the simplified form of |path|,
# which may matter.
max_prefix = potential_paths[0]
max_prefix_length = len(_CommonNormalizedPrefix(max_prefix, path))
for path_for_file in potential_paths[1:]:
prefix_length = len(_CommonNormalizedPrefix(path_for_file, path))
if prefix_length > max_prefix_length:
max_prefix, max_prefix_length = path_for_file, prefix_length
return max_prefix
def Refresh(self):
return self._LoadCache()
| bsd-3-clause |
LICEF/edx-platform | lms/djangoapps/courseware/management/commands/metadata_to_json.py | 30 | 2740 | """
A script to walk a course xml tree, generate a dictionary of all the metadata,
and print it out as a json dict.
"""
import sys
import json
from collections import OrderedDict
from path import path
from django.core.management.base import BaseCommand
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.x_module import policy_key
def import_course(course_dir, verbose=True):
course_dir = path(course_dir)
data_dir = course_dir.dirname()
course_dirs = [course_dir.basename()]
# No default class--want to complain if it doesn't find plugins for any
# module.
modulestore = XMLModuleStore(data_dir,
default_class=None,
course_dirs=course_dirs)
def str_of_err(tpl):
(msg, exc_str) = tpl
return '{msg}\n{exc}'.format(msg=msg, exc=exc_str)
courses = modulestore.get_courses()
n = len(courses)
if n != 1:
sys.stderr.write('ERROR: Expect exactly 1 course. Loaded {n}: {lst}\n'.format(
n=n, lst=courses))
return None
course = courses[0]
errors = modulestore.get_course_errors(course.id)
if len(errors) != 0:
sys.stderr.write('ERRORs during import: {0}\n'.format('\n'.join(map(str_of_err, errors))))
return course
def node_metadata(node):
# make a copy
to_export = ('format', 'display_name',
'graceperiod', 'showanswer', 'rerandomize',
'start', 'due', 'graded', 'hide_from_toc',
'ispublic', 'xqa_key')
orig = own_metadata(node)
d = {k: orig[k] for k in to_export if k in orig}
return d
def get_metadata(course):
d = OrderedDict({})
queue = [course]
while len(queue) > 0:
node = queue.pop()
d[policy_key(node.location)] = node_metadata(node)
# want to print first children first, so put them at the end
# (we're popping from the end)
queue.extend(reversed(node.get_children()))
return d
def print_metadata(course_dir, output):
course = import_course(course_dir)
if course:
meta = get_metadata(course)
result = json.dumps(meta, indent=4)
if output:
with file(output, 'w') as f:
f.write(result)
else:
print result
class Command(BaseCommand):
help = """Imports specified course.xml and prints its
metadata as a json dict.
Usage: metadata_to_json PATH-TO-COURSE-DIR OUTPUT-PATH
if OUTPUT-PATH isn't given, print to stdout.
"""
def handle(self, *args, **options):
n = len(args)
if n < 1 or n > 2:
print Command.help
return
output_path = args[1] if n > 1 else None
print_metadata(args[0], output_path)
| agpl-3.0 |
robwarm/gpaw-symm | gpaw/cluster.py | 1 | 6122 | """Extensions to the ase Atoms class
"""
import numpy as np
from ase import Atoms
from ase.io import read, write
from ase.data import covalent_radii
from ase.calculators.neighborlist import NeighborList
class Cluster(Atoms):
"""A class for cluster structures
to enable simplified manipulation"""
def __init__(self, *args, **kwargs):
self.data = {}
if len(args) > 0:
filename = args[0]
if isinstance(filename, str):
self.read(filename, kwargs.get('filetype'))
return
else:
Atoms.__init__(self, [])
if kwargs.get('filename') is not None:
filename = kwargs.pop('filename')
Atoms.__init__(self, *args, **kwargs)
self.read(filename, kwargs.get('filetype'))
else:
Atoms.__init__(self, *args, **kwargs)
def extreme_positions(self):
"""get the extreme positions of the structure"""
pos = self.get_positions()
return np.array([np.minimum.reduce(pos), np.maximum.reduce(pos)])
def find_connected(self, index, dmax=None, scale=1.5):
"""Find the atoms connected to self[index] and return them.
If dmax is not None:
Atoms are defined to be connected if they are nearer than dmax
to each other.
If dmax is None:
Atoms are defined to be connected if they are nearer than the
sum of their covalent radii * scale to each other.
"""
# set neighbor lists
neighborlist = []
if dmax is None:
# define neighbors according to covalent radii
radii = scale * covalent_radii[self.get_atomic_numbers()]
for atom in self:
positions = self.positions - atom.position
distances = np.sqrt(np.sum(positions**2, axis=1))
radius = scale * covalent_radii[atom.number]
neighborlist.append(np.where(distances < radii + radius)[0])
else:
# define neighbors according to distance
nl = NeighborList([0.5 * dmax] * len(self), skin=0)
nl.update(self)
for i, atom in enumerate(self):
neighborlist.append(list(nl.get_neighbors(i)[0]))
connected = list(neighborlist[index])
isolated = False
while not isolated:
isolated = True
for i in connected:
for j in neighborlist[i]:
if j in connected:
pass
else:
connected.append(j)
isolated = False
atoms = Cluster()
for i in connected:
atoms.append(self[i])
return atoms
def minimal_box(self, border=0, h=None, multiple=4):
"""The box needed to fit the structure in.
The structure is moved to fit into the box [(0,x),(0,y),(0,z)]
with x,y,z > 0 (fitting the ASE constriction).
The border argument can be used to add a border of empty space
around the structure.
If h is set, the box is extended to ensure that box/h is
a multiple of 'multiple'.
This ensures that GPAW uses the desired h.
The shift applied to the structure is returned.
"""
if len(self) == 0:
return None
extr = self.extreme_positions()
# add borders
if type(border)==type([]):
b = border
else:
b = [border, border, border]
for c in range(3):
extr[0][c] -= b[c]
extr[1][c] += b[c] - extr[0][c] # shifted already
# check for multiple of 4
if h is not None:
if not hasattr(h, '__len__'):
h = np.array([h, h, h])
for c in range(3):
# apply the same as in paw.py
L = extr[1][c] # shifted already
N = np.ceil(L / h[c] / multiple) * multiple
# correct L
dL = N * h[c] - L
# move accordingly
extr[1][c] += dL # shifted already
extr[0][c] -= dL / 2.
# move lower corner to (0, 0, 0)
shift = tuple(-1. * np.array(extr[0]))
self.translate(shift)
self.set_cell(tuple(extr[1]))
return shift
def get(self, name):
"""General get"""
attr = 'get_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
elif self.data.has_key(name):
return self.data[name]
else:
return None
def set(self, name, data):
"""General set"""
attr = 'set_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
else:
self.data[name] = data
def read(self, filename, format=None):
"""Read the structure from some file. The type can be given
or it will be guessed from the filename."""
self.__init__(read(filename, format=format))
return len(self)
def write(self, filename=None, format=None, repeat=None):
"""Write the structure to file.
Parameters
----------
format: string
can be given or it will be guessed from the filename
repeat: array, eg.: [1,0,1]
can be used to repeat the structure
"""
if filename is None:
if format is None:
raise RuntimeError('Please specify either filename or format.')
else:
filename = self.get_name() + '.' + format
out = self
if repeat is None:
out = self
else:
out = Cluster([])
cell = self.get_cell().diagonal()
for i in range(repeat[0] + 1):
for j in range(repeat[1] + 1):
for k in range(repeat[2] + 1):
copy = self.copy()
copy.translate(np.array([i, j, k]) * cell)
out += copy
write(filename, out, format)
| gpl-3.0 |
Jorge-Rodriguez/ansible | test/units/modules/network/f5/test_bigip_remote_syslog.py | 21 | 6862 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_remote_syslog import ApiParameters
from library.modules.bigip_remote_syslog import ModuleParameters
from library.modules.bigip_remote_syslog import ModuleManager
from library.modules.bigip_remote_syslog import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_remote_syslog import ApiParameters
from ansible.modules.network.f5.bigip_remote_syslog import ModuleParameters
from ansible.modules.network.f5.bigip_remote_syslog import ModuleManager
from ansible.modules.network.f5.bigip_remote_syslog import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
remote_host='10.10.10.10',
remote_port=514,
local_ip='1.1.1.1'
)
p = ModuleParameters(params=args)
assert p.remote_host == '10.10.10.10'
assert p.remote_port == 514
assert p.local_ip == '1.1.1.1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_remote_syslog(self, *args):
set_module_args(dict(
remote_host='1.1.1.1',
server='localhost',
password='password',
user='admin'
))
fixture = load_fixture('load_tm_sys_syslog_1.json')
current = fixture['remoteServers']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
def test_create_remote_syslog_idempotent(self, *args):
set_module_args(dict(
name='remotesyslog1',
remote_host='10.10.10.10',
server='localhost',
password='password',
user='admin'
))
fixture = load_fixture('load_tm_sys_syslog_1.json')
current = fixture['remoteServers']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_remote_port(self, *args):
set_module_args(dict(
remote_host='10.10.10.10',
remote_port=800,
server='localhost',
password='password',
user='admin'
))
fixture = load_fixture('load_tm_sys_syslog_1.json')
current = fixture['remoteServers']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['remote_port'] == 800
def test_update_local_ip(self, *args):
set_module_args(dict(
remote_host='10.10.10.10',
local_ip='2.2.2.2',
server='localhost',
password='password',
user='admin'
))
fixture = load_fixture('load_tm_sys_syslog_1.json')
current = fixture['remoteServers']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['local_ip'] == '2.2.2.2'
def test_update_no_name_dupe_host(self, *args):
set_module_args(dict(
remote_host='10.10.10.10',
local_ip='2.2.2.2',
server='localhost',
password='password',
user='admin'
))
fixture = load_fixture('load_tm_sys_syslog_2.json')
current = fixture['remoteServers']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "Multiple occurrences of hostname" in str(ex)
| gpl-3.0 |
cggh/DQXServer | responders/recordinfo.py | 1 | 1698 | # This file is part of DQXServer - (C) Copyright 2014, Paul Vauterin, Ben Jeffery, Alistair Miles <info@cggh.org>
# This program is free software licensed under the GNU Affero General Public License.
# You can find a copy of this license in LICENSE in the top directory of the source code or at <http://opensource.org/licenses/AGPL-3.0>
import DQXDbTools
import DQXUtils
from DQXDbTools import DBCOLESC
from DQXDbTools import DBTBESC
import config
def response(returndata):
mytablename = returndata['tbname']
encodedquery = returndata['qry']
databaseName = None
if 'database' in returndata:
databaseName = returndata['database']
with DQXDbTools.DBCursor(returndata, databaseName, read_timeout=config.TIMEOUT) as cur:
whc = DQXDbTools.WhereClause()
whc.ParameterPlaceHolder = '%s' #NOTE!: MySQL PyODDBC seems to require this nonstardard coding
whc.Decode(encodedquery)
whc.CreateSelectStatement()
sqlquery = "SELECT * FROM {0} WHERE {1}".format(
DBTBESC(mytablename),
whc.querystring_params
)
if DQXDbTools.LogRequests:
DQXUtils.LogServer('###QRY:'+sqlquery)
DQXUtils.LogServer('###PARAMS:'+str(whc.queryparams))
cur.execute(sqlquery, whc.queryparams)
therow = cur.fetchone()
if therow is None:
returndata['Error'] = 'Record not found'
else:
data={}
colnr=0
for column in cur.description:
data[column[0]] = str(therow[colnr])
colnr += 1
returndata['Data'] = data
return returndata
| agpl-3.0 |
BeegorMif/HTPC-Manager | lib/html5lib/sanitizer.py | 100 | 14472 | import re
from xml.sax.saxutils import escape, unescape
from tokenizer import HTMLTokenizer
from constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| gpl-3.0 |
lthall/Leonard_ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/dma_resolver.py | 15 | 20646 | #!/usr/bin/env python
import sys, fnmatch
import importlib
# peripheral types that can be shared, wildcard patterns
SHARED_MAP = ["I2C*", "USART*_TX", "UART*_TX", "SPI*", "TIM*_UP", "TIM*_CH*"]
ignore_list = []
dma_map = None
debug = False
def check_possibility(periph, dma_stream, curr_dict, dma_map, check_list, cannot_use_stream, forbidden_map):
global ignore_list
if debug:
print('............ Checking ', periph, dma_stream, 'without', cannot_use_stream)
for other_periph in curr_dict:
if other_periph != periph:
if curr_dict[other_periph] == dma_stream:
if other_periph in forbidden_map[periph]:
if debug:
print('.................... Forbidden', periph, other_periph)
return False
if debug:
print('.................... Collision', other_periph, dma_stream)
ignore_list.append(periph)
check_str = "%s(%d,%d) %s(%d,%d)" % (
other_periph, curr_dict[other_periph][0],
curr_dict[other_periph][1], periph, dma_stream[0],
dma_stream[1])
#check if we did this before
if check_str in check_list:
return False
check_list.append(check_str)
if debug:
print("Trying to Resolve Conflict: ", check_str)
#check if we can resolve by swapping with other periphs
for streamchan in dma_map[other_periph]:
stream = (streamchan[0], streamchan[1])
if stream != curr_dict[other_periph] and \
stream not in cannot_use_stream and \
check_possibility(other_periph, stream, curr_dict, dma_map, check_list,
cannot_use_stream+[(dma_stream)], forbidden_map):
curr_dict[other_periph] = stream
if debug:
print ('....................... Resolving', other_periph, stream)
return True
if debug:
print ('....................... UnSolved !!!!!!!!', periph, dma_stream)
return False
if debug:
print ('....................... Solved ..........', periph, dma_stream)
return True
def can_share(periph, noshare_list):
'''check if a peripheral is in the SHARED_MAP list'''
for noshare in noshare_list:
if fnmatch.fnmatch(periph, noshare):
return False
for f in SHARED_MAP:
if fnmatch.fnmatch(periph, f):
return True
if debug:
print("%s can't share" % periph)
return False
# list of peripherals on H7 that are on DMAMUX2 and BDMA
have_DMAMUX = False
DMAMUX2_peripherals = [ 'I2C4', 'SPI6', 'ADC3' ]
def dmamux_channel(key):
'''return DMAMUX channel for H7'''
for p in DMAMUX2_peripherals:
if key.find(p) != -1:
return 'STM32_DMAMUX2_' + key
# default to DMAMUX1
return 'STM32_DMAMUX1_' + key
def dma_name(key):
'''return 'DMA' or 'BDMA' based on peripheral name'''
if not have_DMAMUX:
return "DMA"
for p in DMAMUX2_peripherals:
if key.find(p) != -1:
return 'BDMA'
return 'DMA'
def chibios_dma_define_name(key):
'''return define name needed for board.h for ChibiOS'''
dma_key = key + '_' + dma_name(key)
if key.startswith('ADC'):
return 'STM32_ADC_%s_' % dma_key
elif key.startswith('SPI'):
return 'STM32_SPI_%s_' % dma_key
elif key.startswith('I2C'):
return 'STM32_I2C_%s_' % dma_key
elif key.startswith('USART'):
return 'STM32_UART_%s_' % dma_key
elif key.startswith('UART'):
return 'STM32_UART_%s_' % dma_key
elif key.startswith('SDIO') or key.startswith('SDMMC'):
return 'STM32_SDC_%s_' % dma_key
elif key.startswith('TIM'):
return 'STM32_TIM_%s_' % dma_key
else:
print("Error: Unknown key type %s" % key)
sys.exit(1)
def get_list_index(peripheral, priority_list):
'''return index into priority_list for a peripheral'''
for i in range(len(priority_list)):
str = priority_list[i]
if fnmatch.fnmatch(peripheral, str):
return i
# default to max priority
return len(priority_list)
def get_sharing_priority(periph_list, priority_list):
'''get priority of a list of peripherals we could share with'''
highest = len(priority_list)
for p in periph_list:
prio = get_list_index(p, priority_list)
if prio < highest:
highest = prio
return highest
def generate_DMAMUX_map_mask(peripheral_list, channel_mask, noshare_list, dma_exclude, stream_ofs):
'''
generate a dma map suitable for a board with a DMAMUX
In principle any peripheral can use any stream, but we need to
ensure that a peripheral doesn't try to use the same stream as its
partner (eg. a RX/TX pair)
'''
dma_map = {}
idsets = {}
# first unshareable peripherals
available = channel_mask
for p in peripheral_list:
dma_map[p] = []
idsets[p] = set()
for p in peripheral_list:
if can_share(p, noshare_list) or p in dma_exclude:
continue
for i in range(16):
mask = (1<<i)
if available & mask != 0:
available &= ~mask
dma = (i // 8) + 1
stream = i % 8
dma_map[p].append((dma,stream))
idsets[p].add(i)
break
if debug:
print('dma_map1: ', dma_map)
print('available: 0x%04x' % available)
# now shareable
idx = 0
for p in peripheral_list:
if not can_share(p, noshare_list) or p in dma_exclude:
continue
base = idx % 16
for i in range(16):
found = None
for ii in list(range(base,16)) + list(range(0,base)):
if (1<<ii) & available == 0:
continue
dma = (ii // 8) + 1
stream = ii % 8
if (dma,stream) in dma_map[p]:
# this peripheral is already using the stream
continue
# prevent attempts to share with other half of same peripheral
if p.endswith('RX'):
other = p[:-2] + 'TX'
elif p.endswith('TX'):
other = p[:-2] + 'RX'
else:
other = None
if other is not None and other in idsets and ii in idsets[other]:
if len(idsets[p]) >= len(idsets[other]) and len(idsets[other]) > 0:
continue
idsets[other].remove(ii)
dma_map[other].remove((dma,stream))
found = ii
break
if found is None:
continue
base = (found+1) % 16
dma = (found // 8) + 1
stream = found % 8
dma_map[p].append((dma,stream))
idsets[p].add(found)
idx = (idx+1) % 16
if stream_ofs != 0:
# add in stream_ofs to cope with STM32G4
for p in dma_map.keys():
for (dma,stream) in dma_map[p]:
map2 = []
map2.append((dma,stream+stream_ofs))
dma_map[p] = map2
if debug:
print('dma_map: ', dma_map)
print('idsets: ', idsets)
print('available: 0x%04x' % available)
return dma_map
def generate_DMAMUX_map(peripheral_list, noshare_list, dma_exclude, stream_ofs):
'''
generate a dma map suitable for a board with a DMAMUX1 and DMAMUX2
'''
# first split peripheral_list into those for DMAMUX1 and those for DMAMUX2
dmamux1_peripherals = []
dmamux2_peripherals = []
for p in peripheral_list:
if dma_name(p) == 'BDMA':
dmamux2_peripherals.append(p)
else:
dmamux1_peripherals.append(p)
map1 = generate_DMAMUX_map_mask(dmamux1_peripherals, 0xFFFF, noshare_list, dma_exclude, stream_ofs)
# there are 8 BDMA channels, but an issue has been found where if I2C4 and SPI6
# use neighboring channels then we sometimes lose a BDMA completion interrupt. To
# avoid this we set the BDMA available mask to 0x33, which forces the channels not to be
# adjacent. This issue was found on a CUAV-X7, with H743 RevV.
map2 = generate_DMAMUX_map_mask(dmamux2_peripherals, 0x55, noshare_list, dma_exclude, stream_ofs)
# translate entries from map2 to "DMA controller 3", which is used for BDMA
for p in map2.keys():
streams = []
for (controller,stream) in map2[p]:
streams.append((3,stream))
map2[p] = streams
both = map1
both.update(map2)
if debug:
print('dma_map_both: ', both)
return both
def sharing_allowed(p1, p2):
'''return true if sharing is allowed between p1 and p2'''
if p1 == p2:
return True
# don't allow RX and TX of same peripheral to share
if p1.endswith('_RX') and p2.endswith('_TX') and p1[:-2] == p2[:-2]:
return False
# don't allow sharing of two TIMn_UP channels as DShot code can't cope
if p1.endswith("_UP") and p2.endswith("_UP") and p1.startswith("TIM") and p2.startswith("TIM"):
return False
return True
def check_sharing(shared):
'''check if DMA channel sharing is OK'''
for p in shared:
# don't share UART RX with anything
if (p.startswith("UART") or p.startswith("USART")) and p.endswith("_RX"):
print("Illegal sharing of %s" % p)
return False
# don't share ADC with anything
if p.startswith("ADC"):
print("Illegal sharing of %s" % p)
return False
for p2 in shared:
if not sharing_allowed(p, p2):
print("Illegal sharing of %s and %s" % (p, p2))
return False
return True
def forbidden_list(p, peripheral_list):
'''given a peripheral, form a list of other peripherals we may not share with'''
ret = []
for p2 in peripheral_list:
if not sharing_allowed(p, p2):
ret.append(p2)
return ret
def write_dma_header(f, peripheral_list, mcu_type, dma_exclude=[],
dma_priority='', dma_noshare=[]):
'''write out a DMA resolver header file'''
global dma_map, have_DMAMUX, has_bdshot
timer_ch_periph = []
has_bdshot = False
# form a list of DMA priorities
priority_list = dma_priority.split()
# sort by priority
peripheral_list = sorted(peripheral_list, key=lambda x: get_list_index(x, priority_list))
# form a list of peripherals that can't share
noshare_list = dma_noshare[:]
try:
lib = importlib.import_module(mcu_type)
if hasattr(lib, "DMA_Map"):
dma_map = lib.DMA_Map
else:
return [], []
except ImportError:
print("Unable to find module for MCU %s" % mcu_type)
sys.exit(1)
if dma_map is None:
have_DMAMUX = True
# ensure we don't assign dma for TIMx_CH as we share that with TIMx_UP
timer_ch_periph = [periph for periph in peripheral_list if "_CH" in periph]
dma_exclude += timer_ch_periph
if mcu_type.startswith("STM32G4"):
stream_ofs = 1
else:
stream_ofs = 0
dma_map = generate_DMAMUX_map(peripheral_list, noshare_list, dma_exclude, stream_ofs)
print("Writing DMA map")
unassigned = []
curr_dict = {}
# build a map from peripheral name to a list of peripherals that it cannot share with
forbidden_map = {}
for p in peripheral_list:
forbidden_map[p] = forbidden_list(p, peripheral_list)
for periph in peripheral_list:
if "_CH" in periph:
has_bdshot = True # the list contains a CH port
if periph in dma_exclude:
continue
assigned = False
check_list = []
if not periph in dma_map:
print("Unknown peripheral function %s in DMA map for %s" %
(periph, mcu_type))
sys.exit(1)
if debug:
print('\n\n.....Starting lookup for', periph)
for streamchan in dma_map[periph]:
if debug:
print('........Possibility for', periph, streamchan)
stream = (streamchan[0], streamchan[1])
if check_possibility(periph, stream, curr_dict, dma_map,
check_list, [], forbidden_map):
curr_dict[periph] = stream
if debug:
print ('....................... Setting', periph, stream)
assigned = True
break
if assigned == False:
unassigned.append(periph)
if debug:
print('curr_dict: ', curr_dict)
print('unassigned: ', unassigned)
# now look for shared DMA possibilities
stream_assign = {}
for k in curr_dict.keys():
p = curr_dict[k]
if not p in stream_assign:
stream_assign[p] = [k]
else:
stream_assign[p].append(k)
unassigned_new = unassigned[:]
for periph in unassigned:
share_possibility = []
for streamchan in dma_map[periph]:
stream = (streamchan[0], streamchan[1])
share_ok = True
for periph2 in stream_assign[stream]:
if not can_share(periph, noshare_list) or not can_share(periph2, noshare_list) or periph2 in forbidden_map[periph]:
share_ok = False
if share_ok:
share_possibility.append(stream)
if share_possibility:
# sort the possible sharings so minimise impact on high priority streams
share_possibility = sorted(share_possibility, key=lambda x: get_sharing_priority(stream_assign[x], priority_list))
# and take the one with the least impact (lowest value for highest priority stream share)
stream = share_possibility[-1]
if debug:
print("Sharing %s on %s with %s" % (periph, stream,
stream_assign[stream]))
curr_dict[periph] = stream
stream_assign[stream].append(periph)
unassigned_new.remove(periph)
unassigned = unassigned_new
for key in sorted(curr_dict.keys()):
stream = curr_dict[key]
if len(stream_assign[stream]) > 1:
if not check_sharing(stream_assign[stream]):
sys.exit(1)
if debug:
print(stream_assign)
f.write("\n\n// auto-generated DMA mapping from dma_resolver.py\n")
if unassigned:
f.write(
"\n// Note: The following peripherals can't be resolved for DMA: %s\n\n"
% unassigned)
ordered_up_channels = []
# produce a list of timers ordered by the DMA streamid of the UP channel
# this is so that the dshot code can take out the UP DMA locks in the same order as I2C and SPI
for key in curr_dict.keys():
if "_UP" in key:
ordered_up_channels.append(key)
def order_by_streamid(key):
stream = curr_dict[key]
return (stream[0] * 8 + stream[1]) * 20 + int(key[3:-3])
ordered_up_channels = sorted(ordered_up_channels, key=order_by_streamid)
ordered_timers = []
for key in ordered_up_channels:
ordered_timers.append(key[0:-3])
for key in sorted(curr_dict.keys()):
stream = curr_dict[key]
shared = ''
if len(stream_assign[stream]) > 1:
shared = ' // shared %s' % ','.join(stream_assign[stream])
if curr_dict[key] == "STM32_DMA_STREAM_ID_ANY":
f.write("#define %-30s STM32_DMA_STREAM_ID_ANY\n" % (chibios_dma_define_name(key)+'STREAM'))
f.write("#define %-30s %s\n" % (chibios_dma_define_name(key)+'CHAN', dmamux_channel(key)))
continue
else:
dma_controller = curr_dict[key][0]
if dma_controller == 3:
# for BDMA we use 3 in the resolver
dma_controller = 1
f.write("#define %-30s STM32_DMA_STREAM_ID(%u, %u)%s\n" %
(chibios_dma_define_name(key)+'STREAM', dma_controller,
curr_dict[key][1], shared))
if have_DMAMUX and "_UP" in key:
# share the dma with rest of the _CH ports
for ch in range(1,5):
chkey = key.replace('_UP', '_CH{}'.format(ch))
if chkey not in timer_ch_periph:
continue
f.write("#define %-30s STM32_DMA_STREAM_ID(%u, %u)%s\n" %
(chibios_dma_define_name(chkey)+'STREAM', dma_controller,
curr_dict[key][1], shared))
for streamchan in dma_map[key]:
if stream == (streamchan[0], streamchan[1]):
if have_DMAMUX:
chan = dmamux_channel(key)
else:
chan = streamchan[2]
f.write("#define %-30s %s\n" %
(chibios_dma_define_name(key)+'CHAN', chan))
if have_DMAMUX and "_UP" in key:
# share the devid with rest of the _CH ports
for ch in range(1,5):
chkey = key.replace('_UP', '_CH{}'.format(ch))
if chkey not in timer_ch_periph:
continue
f.write("#define %-30s %s\n" %
(chibios_dma_define_name(chkey)+'CHAN',
chan.replace('_UP', '_CH{}'.format(ch))))
break
# now generate UARTDriver.cpp DMA config lines
f.write("\n\n// generated UART DMA configuration lines\n")
for u in range(1, 9):
key = None
if 'USART%u_TX' % u in peripheral_list:
key = 'USART%u' % u
if 'UART%u_TX' % u in peripheral_list:
key = 'UART%u' % u
if 'USART%u_RX' % u in peripheral_list:
key = 'USART%u' % u
if 'UART%u_RX' % u in peripheral_list:
key = 'UART%u' % u
if key is None:
continue
if have_DMAMUX:
# use DMAMUX ID as channel number
dma_rx_chn = dmamux_channel(key + "_RX")
dma_tx_chn = dmamux_channel(key + "_TX")
else:
dma_rx_chn = "STM32_UART_%s_RX_%s_CHAN" % (key, dma_name(key))
dma_tx_chn = "STM32_UART_%s_TX_%s_CHAN" % (key, dma_name(key))
f.write("#define STM32_%s_RX_DMA_CONFIG " % key)
if key + "_RX" in curr_dict:
f.write(
"true, STM32_UART_%s_RX_%s_STREAM, %s\n" % (key, dma_name(key), dma_rx_chn))
else:
f.write("false, 0, 0\n")
f.write("#define STM32_%s_TX_DMA_CONFIG " % key)
if key + "_TX" in curr_dict:
f.write(
"true, STM32_UART_%s_TX_%s_STREAM, %s\n" % (key, dma_name(key), dma_tx_chn))
else:
f.write("false, 0, 0\n")
# now generate SPI DMA streams lines
f.write("\n\n// generated SPI DMA configuration lines\n")
for u in range(1, 9):
if 'SPI%u_TX' % u in peripheral_list and 'SPI%u_RX' % u in peripheral_list:
key = 'SPI%u' % u
else:
continue
f.write('#define STM32_SPI_%s_DMA_STREAMS STM32_SPI_%s_TX_%s_STREAM, STM32_SPI_%s_RX_%s_STREAM\n' % (
key, key, dma_name(key), key, dma_name(key)))
return unassigned, ordered_timers
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser("dma_resolver.py")
parser.add_option("-M", "--mcu", default=None, help='MCU type')
parser.add_option(
"-D", "--debug", action='store_true', help='enable debug')
parser.add_option(
"-P",
"--peripherals",
default=None,
help='peripheral list (comma separated)')
opts, args = parser.parse_args()
if opts.peripherals is None:
print("Please provide a peripheral list with -P")
sys.exit(1)
if opts.mcu is None:
print("Please provide a MCU type with -<")
sys.exit(1)
debug = opts.debug
plist = opts.peripherals.split(',')
mcu_type = opts.mcu
f = open("dma.h", "w")
write_dma_header(f, plist, mcu_type)
| gpl-3.0 |
troukny/NetGen | configtool/setuptools-3.6/setuptools/tests/test_sdist.py | 332 | 17816 | # -*- coding: utf-8 -*-
"""sdist tests"""
import locale
import os
import shutil
import sys
import tempfile
import unittest
import unicodedata
import re
from setuptools.tests import environment, test_svn
from setuptools.tests.py26compat import skipIf
from setuptools.compat import StringIO, unicode
from setuptools.tests.py26compat import skipIf
from setuptools.command.sdist import sdist, walk_revctrl
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools import svn_utils
SETUP_ATTRS = {
'name': 'sdist_test',
'version': '0.0',
'packages': ['sdist_test'],
'package_data': {'sdist_test': ['*.txt']}
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
if sys.version_info >= (3,):
LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1')
else:
LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py'
# Cannot use context manager because of Python 2.4
def quiet():
global old_stdout, old_stderr
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def unquiet():
sys.stdout, sys.stderr = old_stdout, old_stderr
# Fake byte literals for Python <= 2.5
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# Convert to POSIX path
def posix(path):
if sys.version_info >= (3,) and not isinstance(path, str):
return path.replace(os.sep.encode('ascii'), b('/'))
else:
return path.replace(os.sep, '/')
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
class TestSdistTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'sdist_test')
os.mkdir(test_pkg)
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
# Just touch the files; their contents are irrelevant
open(os.path.join(test_pkg, fname), 'w').close()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# squelch output
quiet()
try:
cmd.run()
finally:
unquiet()
manifest = cmd.filelist.files
self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest)
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
# Add UTF-8 filename and write manifest
quiet()
try:
mm.run()
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
u_contents = contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
if sys.version_info >= (3,):
self.assertTrue(posix(filename) in u_contents)
else:
self.assertTrue(posix(filename) in contents)
# Python 3 only
if sys.version_info >= (3,):
def test_write_manifest_allows_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
# Add filename and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
self.assertTrue(posix(filename) in contents)
# The filelist should have been updated as well
self.assertTrue(u_filename in mm.filelist.files)
def test_write_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
# Add filename with surrogates and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8', 'surrogateescape')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The Latin-1 filename should have been skipped
self.assertFalse(posix(filename) in contents)
# The filelist should have been updated as well
self.assertFalse(u_filename in mm.filelist.files)
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add UTF-8 filename to manifest
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
cmd.read_manifest()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
if sys.version_info >= (3,):
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
# Python 3 only
if sys.version_info >= (3,):
def test_read_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add Latin-1 filename to manifest
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
try:
cmd.read_manifest()
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
finally:
unquiet()
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
@skipIf(sys.version_info >= (3,) and locale.getpreferredencoding() != 'UTF-8',
'Unittest fails if locale is not utf-8 but the manifests is recorded correctly')
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.platform == 'darwin':
filename = decompose(filename)
if sys.version_info >= (3,):
fs_enc = sys.getfilesystemencoding()
if sys.platform == 'win32':
if fs_enc == 'cp1252':
# Python 3 mangles the UTF-8 filename
filename = filename.decode('cp1252')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('mbcs')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
else:
self.assertTrue(filename in cmd.filelist.files)
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
open(filename, 'w').close()
self.assertTrue(os.path.isfile(filename))
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.version_info >= (3,):
#not all windows systems have a default FS encoding of cp1252
if sys.platform == 'win32':
# Latin-1 is similar to Windows-1252 however
# on mbcs filesys it is not in latin-1 encoding
fs_enc = sys.getfilesystemencoding()
if fs_enc == 'mbcs':
filename = filename.decode('mbcs')
else:
filename = filename.decode('latin-1')
self.assertTrue(filename in cmd.filelist.files)
else:
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
else:
# No conversion takes place under Python 2 and the file
# is included. We shall keep it that way for BBB.
self.assertTrue(filename in cmd.filelist.files)
class TestDummyOutput(environment.ZippedEnvironment):
def setUp(self):
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', "dummy.zip")
self.dataname = "dummy"
super(TestDummyOutput, self).setUp()
def _run(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
data_stream=0)
if code:
info = "DIR: " + os.path.abspath('.')
info += "\n SDIST RETURNED: %i\n\n" % code
info += data
raise AssertionError(info)
datalines = data.splitlines()
possible = (
"running sdist",
"running egg_info",
"creating dummy\.egg-info",
"writing dummy\.egg-info",
"writing top-level names to dummy\.egg-info",
"writing dependency_links to dummy\.egg-info",
"writing manifest file 'dummy\.egg-info",
"reading manifest file 'dummy\.egg-info",
"reading manifest template 'MANIFEST\.in'",
"writing manifest file 'dummy\.egg-info",
"creating dummy-0.1.1",
"making hard links in dummy-0\.1\.1",
"copying files to dummy-0\.1\.1",
"copying \S+ -> dummy-0\.1\.1",
"copying dummy",
"copying dummy\.egg-info",
"hard linking \S+ -> dummy-0\.1\.1",
"hard linking dummy",
"hard linking dummy\.egg-info",
"Writing dummy-0\.1\.1",
"creating dist",
"creating 'dist",
"Creating tar archive",
"running check",
"adding 'dummy-0\.1\.1",
"tar .+ dist/dummy-0\.1\.1\.tar dummy-0\.1\.1",
"gzip .+ dist/dummy-0\.1\.1\.tar",
"removing 'dummy-0\.1\.1' \\(and everything under it\\)",
)
print(" DIR: " + os.path.abspath('.'))
for line in datalines:
found = False
for pattern in possible:
if re.match(pattern, line):
print(" READ: " + line)
found = True
break
if not found:
raise AssertionError("Unexpexected: %s\n-in-\n%s"
% (line, data))
return data
def test_sources(self):
self._run()
class TestSvn(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # None or Empty
return
self.base_version = tuple([int(x) for x in version.split('.')][:2])
if not self.base_version:
raise ValueError('No SVN tools installed')
elif self.base_version < (1, 3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1, 9):
#trying the latest version
self.base_version = (1, 8)
self.dataname = "svn%i%i_example" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvn, self).setUp()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_walksvn(self):
if self.base_version >= (1, 6):
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
#TODO is this right
expected = set([
os.path.join('a file'),
os.path.join(folder2, 'Changes.txt'),
os.path.join(folder2, 'MD5SUMS'),
os.path.join(folder2, 'README.txt'),
os.path.join(folder3, 'Changes.txt'),
os.path.join(folder3, 'MD5SUMS'),
os.path.join(folder3, 'README.txt'),
os.path.join(folder3, 'TODO.txt'),
os.path.join(folder3, 'fin'),
os.path.join('third_party', 'README.txt'),
os.path.join('folder', folder2, 'Changes.txt'),
os.path.join('folder', folder2, 'MD5SUMS'),
os.path.join('folder', folder2, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'Changes.txt'),
os.path.join('folder', folder3, 'fin'),
os.path.join('folder', folder3, 'MD5SUMS'),
os.path.join('folder', folder3, 'oops'),
os.path.join('folder', folder3, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'ZuMachen.txt'),
os.path.join('folder', 'third_party', 'WatashiNiYomimasu.txt'),
os.path.join('folder', 'lalala.txt'),
os.path.join('folder', 'quest.txt'),
# The example will have a deleted file
# (or should) but shouldn't return it
])
self.assertEqual(set(x for x in walk_revctrl()), expected)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| gpl-3.0 |
neoareslinux/neutron | neutron/agent/dhcp/config.py | 2 | 3128 | # Copyright 2015 OpenStack Foundation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
DHCP_AGENT_OPTS = [
cfg.IntOpt('resync_interval', default=5,
help=_("Interval to resync.")),
cfg.StrOpt('dhcp_driver',
default='neutron.agent.linux.dhcp.Dnsmasq',
help=_("The driver used to manage the DHCP server.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('force_metadata', default=False,
help=_("Force to use DHCP to get Metadata on all networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicated network. Requires "
"enable_isolated_metadata = True")),
cfg.IntOpt('num_sync_threads', default=4,
help=_('Number of threads to use during sync process.'))
]
DHCP_OPTS = [
cfg.StrOpt('dhcp_confs',
default='$state_path/dhcp',
help=_('Location to store DHCP server config files')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
]
DNSMASQ_OPTS = [
cfg.StrOpt('dnsmasq_config_file',
default='',
help=_('Override the default dnsmasq settings with this file')),
cfg.ListOpt('dnsmasq_dns_servers',
help=_('Comma-separated list of the DNS servers which will be '
'used as forwarders.'),
deprecated_name='dnsmasq_dns_server'),
cfg.BoolOpt('dhcp_delete_namespaces', default=True,
help=_("Delete namespace after removing a dhcp server."
"This option is deprecated and "
"will be removed in a future release."),
deprecated_for_removal=True),
cfg.StrOpt('dnsmasq_base_log_dir',
help=_("Base log dir for dnsmasq logging. "
"The log contains DHCP and DNS log information and "
"is useful for debugging issues with either DHCP or "
"DNS. If this section is null, disable dnsmasq log.")),
cfg.IntOpt(
'dnsmasq_lease_max',
default=(2 ** 24),
help=_('Limit number of leases to prevent a denial-of-service.')),
cfg.BoolOpt('dhcp_broadcast_reply', default=False,
help=_("Use broadcast in DHCP replies")),
]
| apache-2.0 |
htzy/bigfour | common/lib/calc/calc/functions.py | 279 | 1521 | """
Provide the mathematical functions that numpy doesn't.
Specifically, the secant/cosecant/cotangents and their inverses and
hyperbolic counterparts
"""
import numpy
# Normal Trig
def sec(arg):
"""
Secant
"""
return 1 / numpy.cos(arg)
def csc(arg):
"""
Cosecant
"""
return 1 / numpy.sin(arg)
def cot(arg):
"""
Cotangent
"""
return 1 / numpy.tan(arg)
# Inverse Trig
# http://en.wikipedia.org/wiki/Inverse_trigonometric_functions#Relationships_among_the_inverse_trigonometric_functions
def arcsec(val):
"""
Inverse secant
"""
return numpy.arccos(1. / val)
def arccsc(val):
"""
Inverse cosecant
"""
return numpy.arcsin(1. / val)
def arccot(val):
"""
Inverse cotangent
"""
if numpy.real(val) < 0:
return -numpy.pi / 2 - numpy.arctan(val)
else:
return numpy.pi / 2 - numpy.arctan(val)
# Hyperbolic Trig
def sech(arg):
"""
Hyperbolic secant
"""
return 1 / numpy.cosh(arg)
def csch(arg):
"""
Hyperbolic cosecant
"""
return 1 / numpy.sinh(arg)
def coth(arg):
"""
Hyperbolic cotangent
"""
return 1 / numpy.tanh(arg)
# And their inverses
def arcsech(val):
"""
Inverse hyperbolic secant
"""
return numpy.arccosh(1. / val)
def arccsch(val):
"""
Inverse hyperbolic cosecant
"""
return numpy.arcsinh(1. / val)
def arccoth(val):
"""
Inverse hyperbolic cotangent
"""
return numpy.arctanh(1. / val)
| agpl-3.0 |
alexdglover/shill-isms | venv/lib/python2.7/site-packages/gunicorn/reloader.py | 98 | 1533 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import re
import sys
import time
import threading
class Reloader(threading.Thread):
def __init__(self, extra_files=None, interval=1, callback=None):
super(Reloader, self).__init__()
self.setDaemon(True)
self._extra_files = set(extra_files or ())
self._extra_files_lock = threading.RLock()
self._interval = interval
self._callback = callback
def add_extra_file(self, filename):
with self._extra_files_lock:
self._extra_files.add(filename)
def get_files(self):
fnames = [
re.sub('py[co]$', 'py', module.__file__)
for module in list(sys.modules.values())
if hasattr(module, '__file__')
]
with self._extra_files_lock:
fnames.extend(self._extra_files)
return fnames
def run(self):
mtimes = {}
while True:
for filename in self.get_files():
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
if self._callback:
self._callback(filename)
time.sleep(self._interval)
| mit |
hudora/huDjango | hudjango/management/commands/couchdb-init.py | 1 | 1234 | # encoding: utf-8
import couchdb
from optparse import make_option
from hudjango.management.couchdb.support import CouchDBBaseCommand
from django.core.management.base import CommandError
class Command(CouchDBBaseCommand):
help = """ Creates a new couchdb database. """
option_list = CouchDBBaseCommand.option_list + (
make_option('--purge', action='store_true', help='Delete existing database [default: %default]'),
)
def handle(self, *args, **options):
# get the name of the database to create
if len(args) != 1:
raise CommandError("You need to specify exactly one argument as database name")
database = args[0]
# drop a possibly existing database if the user wants us to.
couch = self._server(options)
if options['purge']:
try:
couch.delete(database)
except couchdb.client.ResourceNotFound:
pass
# then create the new database
try:
couch.create(database)
except couchdb.client.PreconditionFailed, exception:
raise CommandError("%s: %s" % (database, str(exception)))
print "database '%s' created succesfully" % database
| bsd-2-clause |
vicky2135/lucious | oscar/lib/python2.7/site-packages/factory/containers.py | 2 | 11658 | # -*- coding: utf-8 -*-
# Copyright (c) 2010 Mark Sandstrom
# Copyright (c) 2011-2015 Raphaël Barrois
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import logging
from . import declarations
from . import errors
from . import utils
logger = logging.getLogger(__name__)
class LazyStub(object):
"""A generic container that only allows getting attributes.
Attributes are set at instantiation time, values are computed lazily.
Attributes:
__initialized (bool): whether this object's __init__ as run. If set,
setting any attribute will be prevented.
__attrs (dict): maps attribute name to their declaration
__values (dict): maps attribute name to computed value
__pending (str list): names of the attributes whose value is being
computed. This allows to detect cyclic lazy attribute definition.
__containers (LazyStub list): "parents" of the LazyStub being built.
This allows to have the field of a field depend on the value of
another field
__model_class (type): the model class to build.
"""
__initialized = False
def __init__(self, attrs, containers=(), model_class=object, log_ctx=None):
self.__attrs = attrs
self.__values = {}
self.__pending = []
self.__containers = containers
self.__model_class = model_class
self.__log_ctx = log_ctx or '%s.%s' % (model_class.__module__, model_class.__name__)
self.factory_parent = containers[0] if containers else None
self.__initialized = True
def __repr__(self):
return '<LazyStub for %s.%s>' % (self.__model_class.__module__, self.__model_class.__name__)
def __str__(self):
return '<LazyStub for %s with %s>' % (
self.__model_class.__name__, list(self.__attrs.keys()))
def __fill__(self):
"""Fill this LazyStub, computing values of all defined attributes.
Retunrs:
dict: map of attribute name => computed value
"""
res = {}
logger.debug(
"LazyStub: Computing values for %s(%s)",
self.__log_ctx, utils.log_pprint(kwargs=self.__attrs),
)
for attr in self.__attrs:
res[attr] = getattr(self, attr)
logger.debug(
"LazyStub: Computed values, got %s(%s)",
self.__log_ctx, utils.log_pprint(kwargs=res),
)
return res
def __getattr__(self, name):
"""Retrieve an attribute's value.
This will compute it if needed, unless it is already on the list of
attributes being computed.
"""
if name in self.__pending:
raise errors.CyclicDefinitionError(
"Cyclic lazy attribute definition for %s; cycle found in %r." %
(name, self.__pending))
elif name in self.__values:
return self.__values[name]
elif name in self.__attrs:
val = self.__attrs[name]
if isinstance(val, LazyValue):
self.__pending.append(name)
try:
val = val.evaluate(self, self.__containers)
finally:
last = self.__pending.pop()
assert name == last
self.__values[name] = val
return val
else:
raise AttributeError(
"The parameter %s is unknown. Evaluated attributes are %r, "
"definitions are %r." % (name, self.__values, self.__attrs))
def __setattr__(self, name, value):
"""Prevent setting attributes once __init__ is done."""
if not self.__initialized:
return super(LazyStub, self).__setattr__(name, value)
else:
raise AttributeError('Setting of object attributes is not allowed')
class DeclarationStack(object):
"""An ordered stack of declarations.
This is intended to handle declaration precedence among different mutating layers.
"""
def __init__(self, ordering):
self.ordering = ordering
self.layers = dict((name, {}) for name in self.ordering)
def __getitem__(self, key):
return self.layers[key]
def __setitem__(self, key, value):
assert key in self.ordering
self.layers[key] = value
def current(self):
"""Retrieve the current, flattened declarations dict."""
result = {}
for layer in self.ordering:
result.update(self.layers[layer])
return result
class ParameterResolver(object):
"""Resolve a factory's parameter declarations."""
def __init__(self, parameters, deps):
self.parameters = parameters
self.deps = deps
self.declaration_stack = None
self.resolved = set()
def resolve_one(self, name):
"""Compute one field is needed, taking dependencies into accounts."""
if name in self.resolved:
return
for dep in self.deps.get(name, ()):
self.resolve_one(dep)
self.compute(name)
self.resolved.add(name)
def compute(self, name):
"""Actually compute the value for a given name."""
value = self.parameters[name]
if isinstance(value, declarations.ComplexParameter):
overrides = value.compute(name, self.declaration_stack.current())
else:
overrides = {name: value}
self.declaration_stack['overrides'].update(overrides)
def resolve(self, declaration_stack):
"""Resolve parameters for a given declaration stack.
Modifies the stack in-place.
"""
self.declaration_stack = declaration_stack
for name in self.parameters:
self.resolve_one(name)
class LazyValue(object):
"""Some kind of "lazy evaluating" object."""
def evaluate(self, obj, containers=()): # pragma: no cover
"""Compute the value, using the given object."""
raise NotImplementedError("This is an abstract method.")
class DeclarationWrapper(LazyValue):
"""Lazy wrapper around an OrderedDeclaration.
Attributes:
declaration (declarations.OrderedDeclaration): the OrderedDeclaration
being wrapped
sequence (int): the sequence counter to use when evaluatin the
declaration
"""
def __init__(self, declaration, sequence, create, extra=None, **kwargs):
super(DeclarationWrapper, self).__init__(**kwargs)
self.declaration = declaration
self.sequence = sequence
self.create = create
self.extra = extra
def evaluate(self, obj, containers=()):
"""Lazily evaluate the attached OrderedDeclaration.
Args:
obj (LazyStub): the object being built
containers (object list): the chain of containers of the object
being built, its immediate holder being first.
"""
return self.declaration.evaluate(
self.sequence, obj,
create=self.create,
extra=self.extra,
containers=containers,
)
def __repr__(self):
return '<%s for %r>' % (self.__class__.__name__, self.declaration)
class AttributeBuilder(object):
"""Builds attributes from a factory and extra data.
Attributes:
factory (base.Factory): the Factory for which attributes are being
built
_declarations (DeclarationDict): the attribute declarations for the factory
_subfields (dict): dict mapping an attribute name to a dict of
overridden default values for the related SubFactory.
"""
def __init__(self, factory, extra=None, log_ctx=None, **kwargs):
super(AttributeBuilder, self).__init__(**kwargs)
if not extra:
extra = {}
self.factory = factory
self._containers = extra.pop('__containers', ())
initial_declarations = dict(factory._meta.declarations)
self._log_ctx = log_ctx
# Parameters
# ----------
self._declarations = self.merge_declarations(initial_declarations, extra)
# Subfields
# ---------
attrs_with_subfields = [
k for k, v in initial_declarations.items()
if self.has_subfields(v)
]
# Extract subfields; THIS MODIFIES self._declarations.
self._subfields = utils.multi_extract_dict(
attrs_with_subfields, self._declarations)
def has_subfields(self, value):
return isinstance(value, declarations.ParameteredAttribute)
def merge_declarations(self, initial, extra):
"""Compute the final declarations, taking into account paramter-based overrides."""
# Precedence order:
# - Start with class-level declarations
# - Add overrides from parameters
# - Finally, use callsite-level declarations & values
declaration_stack = DeclarationStack(['initial', 'overrides', 'extra'])
declaration_stack['initial'] = initial.copy()
declaration_stack['extra'] = extra.copy()
# Actually compute the final stack
resolver = ParameterResolver(
parameters=self.factory._meta.parameters,
deps=self.factory._meta.parameters_dependencies,
)
resolver.resolve(declaration_stack)
return declaration_stack.current()
def build(self, create, force_sequence=None):
"""Build a dictionary of attributes.
Args:
create (bool): whether to 'build' or 'create' the subfactories.
force_sequence (int or None): if set to an int, use this value for
the sequence counter; don't advance the related counter.
"""
# Setup factory sequence.
if force_sequence is None:
sequence = self.factory._generate_next_sequence()
else:
sequence = force_sequence
# Parse attribute declarations, wrapping SubFactory and
# OrderedDeclaration.
wrapped_attrs = {}
for k, v in self._declarations.items():
if isinstance(v, declarations.OrderedDeclaration):
v = DeclarationWrapper(
v,
sequence=sequence,
create=create,
extra=self._subfields.get(k, {}),
)
wrapped_attrs[k] = v
stub = LazyStub(
wrapped_attrs, containers=self._containers,
model_class=self.factory, log_ctx=self._log_ctx)
return stub.__fill__()
class StubObject(object):
"""A generic container."""
pass
| bsd-3-clause |
iwaseyusuke/ryu | ryu/tests/unit/ofproto/test_oxm.py | 29 | 5609 | # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import ryu.ofproto.ofproto_v1_3 as ofp
class Test_OXM(unittest.TestCase):
def _test_encode(self, user, on_wire):
(f, uv) = user
(n, v, m) = ofp.oxm_from_user(f, uv)
buf = bytearray()
ofp.oxm_serialize(n, v, m, buf, 0)
self.assertEqual(on_wire, buf)
def _test_decode(self, user, on_wire):
(n, v, m, l) = ofp.oxm_parse(on_wire, 0)
self.assertEqual(len(on_wire), l)
(f, uv) = ofp.oxm_to_user(n, v, m)
self.assertEqual(user, (f, uv))
def _test_encode_header(self, user, on_wire):
f = user
n = ofp.oxm_from_user_header(f)
buf = bytearray()
ofp.oxm_serialize_header(n, buf, 0)
self.assertEqual(on_wire, buf)
def _test_decode_header(self, user, on_wire):
(n, l) = ofp.oxm_parse_header(on_wire, 0)
self.assertEqual(len(on_wire), l)
f = ofp.oxm_to_user_header(n)
self.assertEqual(user, f)
def _test(self, user, on_wire, header_bytes):
self._test_encode(user, on_wire)
self._test_decode(user, on_wire)
if isinstance(user[1], tuple): # has mask?
return
user_header = user[0]
on_wire_header = on_wire[:header_bytes]
self._test_decode_header(user_header, on_wire_header)
if user_header.startswith('field_'):
return # not supported
self._test_encode_header(user_header, on_wire_header)
def test_basic_nomask(self):
user = ('ipv4_src', '192.0.2.1')
on_wire = (
b'\x80\x00\x16\x04'
b'\xc0\x00\x02\x01'
)
self._test(user, on_wire, 4)
def test_basic_mask(self):
user = ('ipv4_src', ('192.0.2.1', '255.255.0.0'))
on_wire = (
b'\x80\x00\x17\x08'
b'\xc0\x00\x02\x01'
b'\xff\xff\x00\x00'
)
self._test(user, on_wire, 4)
def test_exp_nomask(self):
user = ('_dp_hash', 0x12345678)
on_wire = (
b'\xff\xff\x00\x08'
b'\x00\x00\x23\x20' # Nicira
b'\x12\x34\x56\x78'
)
self._test(user, on_wire, 8)
def test_exp_mask(self):
user = ('_dp_hash', (0x12345678, 0x7fffffff))
on_wire = (
b'\xff\xff\x01\x0c'
b'\x00\x00\x23\x20' # Nicira
b'\x12\x34\x56\x78'
b'\x7f\xff\xff\xff'
)
self._test(user, on_wire, 8)
def test_exp_nomask_2(self):
user = ('tcp_flags', 0x876)
on_wire = (
b'\xff\xff\x54\x06'
b'\x4f\x4e\x46\x00' # ONF
b'\x08\x76'
)
self._test(user, on_wire, 8)
def test_exp_mask_2(self):
user = ('tcp_flags', (0x876, 0x7ff))
on_wire = (
b'\xff\xff\x55\x08'
b'\x4f\x4e\x46\x00' # ONF
b'\x08\x76'
b'\x07\xff'
)
self._test(user, on_wire, 8)
def test_exp_nomask_3(self):
user = ('actset_output', 0x98765432)
on_wire = (
b'\xff\xff\x56\x08'
b'\x4f\x4e\x46\x00' # ONF
b'\x98\x76\x54\x32'
)
self._test(user, on_wire, 8)
def test_exp_mask_3(self):
user = ('actset_output', (0x98765432, 0xfffffffe))
on_wire = (
b'\xff\xff\x57\x0c'
b'\x4f\x4e\x46\x00' # ONF
b'\x98\x76\x54\x32'
b'\xff\xff\xff\xfe'
)
self._test(user, on_wire, 8)
def test_nxm_1_nomask(self):
user = ('tun_ipv4_src', '192.0.2.1')
on_wire = (
b'\x00\x01\x3e\x04'
b'\xc0\x00\x02\x01'
)
self._test(user, on_wire, 4)
def test_nxm_1_mask(self):
user = ('tun_ipv4_src', ('192.0.2.1', '255.255.0.0'))
on_wire = (
b'\x00\x01\x3f\x08'
b'\xc0\x00\x02\x01'
b'\xff\xff\x00\x00'
)
self._test(user, on_wire, 4)
def test_ext_256_nomask(self):
user = ('pbb_uca', 50)
on_wire = (
b'\xff\xff\x00\x07'
b'\x4f\x4e\x46\x00' # ONF
b'\x0a\x00'
b'\x32'
)
self._test(user, on_wire, 10)
def test_ext_256_mask(self):
user = ('pbb_uca', (50, 51))
on_wire = (
b'\xff\xff\x01\x08'
b'\x4f\x4e\x46\x00' # ONF
b'\x0a\x00'
b'\x32'
b'\x33'
)
self._test(user, on_wire, 10)
def test_basic_unknown_nomask(self):
user = ('field_100', 'aG9nZWhvZ2U=')
on_wire = (
b'\x00\x00\xc8\x08'
b'hogehoge'
)
self._test(user, on_wire, 4)
def test_basic_unknown_mask(self):
user = ('field_100', ('aG9nZWhvZ2U=', 'ZnVnYWZ1Z2E='))
on_wire = (
b'\x00\x00\xc9\x10'
b'hogehoge'
b'fugafuga'
)
self._test(user, on_wire, 4)
| apache-2.0 |
sestrella/ansible | lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py | 24 | 9625 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqlfirewallrule
version_added: "2.7"
short_description: Manage Firewall Rule instance
description:
- Create, update and delete instance of Firewall Rule.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the firewall rule.
required: True
start_ip_address:
description:
- The start IP address of the firewall rule.
- Must be IPv4 format. Use value C(0.0.0.0) to represent all Azure-internal IP addresses.
end_ip_address:
description:
- The end IP address of the firewall rule.
- Must be IPv4 format. Must be greater than or equal to I(start_ip_address). Use value C(0.0.0.0) to represent all Azure-internal IP addresses.
state:
description:
- State of the SQL Database. Use C(present) to create or update an SQL Database and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create (or update) Firewall Rule
azure_rm_sqlfirewallrule:
resource_group: myResourceGroup
server_name: firewallrulecrudtest-6285
name: firewallrulecrudtest-5370
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/firewallrulecrudtest-628
5/firewallRules/firewallrulecrudtest-5370"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSqlFirewallRule(AzureRMModuleBase):
"""Configuration class for an Azure RM Firewall Rule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
start_ip_address=dict(
type='str'
),
end_ip_address=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.start_ip_address = None
self.end_ip_address = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMSqlFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = self.get_firewallrule()
response = None
if not old_response:
self.log("Firewall Rule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Firewall Rule instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Firewall Rule instance has to be deleted or may be updated")
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
self.to_do = Actions.Update
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Firewall Rule instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_firewallrule()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Firewall Rule instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_firewallrule()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_firewallrule():
time.sleep(20)
else:
self.log("Firewall Rule instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_firewallrule(self):
'''
Creates or updates Firewall Rule with the specified configuration.
:return: deserialized Firewall Rule instance state dictionary
'''
self.log("Creating / Updating the Firewall Rule instance {0}".format(self.name))
try:
response = self.sql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name,
start_ip_address=self.start_ip_address,
end_ip_address=self.end_ip_address)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Firewall Rule instance.')
self.fail("Error creating the Firewall Rule instance: {0}".format(str(exc)))
return response.as_dict()
def delete_firewallrule(self):
'''
Deletes specified Firewall Rule instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Firewall Rule instance {0}".format(self.name))
try:
response = self.sql_client.firewall_rules.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Firewall Rule instance.')
self.fail("Error deleting the Firewall Rule instance: {0}".format(str(e)))
return True
def get_firewallrule(self):
'''
Gets the properties of the specified Firewall Rule.
:return: deserialized Firewall Rule instance state dictionary
'''
self.log("Checking if the Firewall Rule instance {0} is present".format(self.name))
found = False
try:
response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Firewall Rule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Firewall Rule instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMSqlFirewallRule()
if __name__ == '__main__':
main()
| gpl-3.0 |
rubencabrera/odoo | addons/website/models/test_models.py | 335 | 1386 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
class test_converter(orm.Model):
_name = 'website.converter.test'
# disable translation export for those brilliant field labels and values
_translate = False
_columns = {
'char': fields.char(),
'integer': fields.integer(),
'float': fields.float(),
'numeric': fields.float(digits=(16, 2)),
'many2one': fields.many2one('website.converter.test.sub'),
'binary': fields.binary(),
'date': fields.date(),
'datetime': fields.datetime(),
'selection': fields.selection([
(1, "réponse A"),
(2, "réponse B"),
(3, "réponse C"),
(4, "réponse D"),
]),
'selection_str': fields.selection([
('A', "Qu'il n'est pas arrivé à Toronto"),
('B', "Qu'il était supposé arriver à Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La réponse D"),
], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
u"qu'il fait une escale technique à St Claude, on dit:"),
'html': fields.html(),
'text': fields.text(),
}
class test_converter_sub(orm.Model):
_name = 'website.converter.test.sub'
_columns = {
'name': fields.char(),
}
| agpl-3.0 |
hgrall/merite | archives/grall/node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| gpl-3.0 |
capitalone/cloud-custodian | tools/c7n_mailer/c7n_mailer/replay.py | 1 | 5458 | """
Allow local testing of mailer and templates by replaying an SQS message.
MAILER_FILE input is a file containing the exact base64-encoded, gzipped
data that's enqueued to SQS via :py:meth:`c7n.actions.Notify.send_sqs`.
Alternatively, with -p|--plain specified, the file will be assumed to be
JSON data that can be loaded directly.
"""
import argparse
import base64
import json
import logging
import os
import zlib
import yaml
import boto3
import jsonschema
from c7n_mailer.cli import CONFIG_SCHEMA
from c7n_mailer.email_delivery import EmailDelivery
from c7n_mailer.utils import setup_defaults
from c7n_mailer.utils_email import get_mimetext_message
logger = logging.getLogger(__name__)
class MailerTester:
def __init__(self, msg_file, config, msg_plain=False, json_dump_file=None):
if not os.path.exists(msg_file):
raise RuntimeError("File does not exist: %s" % msg_file)
logger.debug('Reading message from: %s', msg_file)
with open(msg_file, 'r') as fh:
raw = fh.read()
logger.debug('Read %d byte message', len(raw))
if msg_plain:
raw = raw.strip()
else:
logger.debug('base64-decoding and zlib decompressing message')
raw = zlib.decompress(base64.b64decode(raw))
if json_dump_file is not None:
with open(json_dump_file, 'wb') as fh: # pragma: no cover
fh.write(raw)
self.data = json.loads(raw)
logger.debug('Loaded message JSON')
self.config = config
self.session = boto3.Session()
def run(self, dry_run=False, print_only=False):
emd = EmailDelivery(self.config, self.session, logger)
addrs_to_msgs = emd.get_to_addrs_email_messages_map(self.data)
logger.info('Would send email to: %s', addrs_to_msgs.keys())
if print_only:
mime = get_mimetext_message(
self.config,
logger,
self.data,
self.data['resources'],
['foo@example.com']
)
logger.info('Send mail with subject: "%s"', mime['Subject'])
print(mime.get_payload(None, True).decode('utf-8'))
return
if dry_run:
for to_addrs, mimetext_msg in addrs_to_msgs.items():
print('-> SEND MESSAGE TO: %s' % '; '.join(to_addrs))
print(mimetext_msg.get_payload(None, True).decode('utf-8'))
return
# else actually send the message...
for to_addrs, mimetext_msg in addrs_to_msgs.items():
logger.info('Actually sending mail to: %s', to_addrs)
emd.send_c7n_email(self.data, list(to_addrs), mimetext_msg)
def setup_parser():
parser = argparse.ArgumentParser('Test c7n-mailer templates and mail')
parser.add_argument('-c', '--config', required=True)
parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
default=False,
help='Log messages that would be sent, but do not send')
parser.add_argument('-T', '--template-print', dest='print_only',
action='store_true', default=False,
help='Just print rendered templates')
parser.add_argument('-t', '--templates', default=None, type=str,
help='message templates folder location')
parser.add_argument('-p', '--plain', dest='plain', action='store_true',
default=False,
help='Expect MESSAGE_FILE to be a plain string, '
'rather than the base64-encoded, gzipped SQS '
'message format')
parser.add_argument('-j', '--json-dump-file', dest='json_dump_file',
type=str, action='store', default=None,
help='If dump JSON of MESSAGE_FILE to this path; '
'useful to base64-decode and gunzip a message')
parser.add_argument('MESSAGE_FILE', type=str,
help='Path to SQS message dump/content file')
return parser
def session_factory(config):
return boto3.Session(
region_name=config['region'],
profile_name=config.get('profile'))
def main():
parser = setup_parser()
options = parser.parse_args()
module_dir = os.path.dirname(os.path.abspath(__file__))
default_templates = [
os.path.abspath(os.path.join(module_dir, 'msg-templates')),
os.path.abspath(os.path.join(module_dir, '..', 'msg-templates')),
os.path.abspath('.')
]
templates = options.templates
if templates:
default_templates.append(
os.path.abspath(os.path.expanduser(os.path.expandvars(templates)))
)
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format)
logging.getLogger('botocore').setLevel(logging.WARNING)
with open(options.config) as fh:
config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
jsonschema.validate(config, CONFIG_SCHEMA)
setup_defaults(config)
config['templates_folders'] = default_templates
tester = MailerTester(
options.MESSAGE_FILE, config, msg_plain=options.plain,
json_dump_file=options.json_dump_file
)
tester.run(options.dry_run, options.print_only)
if __name__ == '__main__':
main()
| apache-2.0 |
40223143/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/tokenize.py | 728 | 24424 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
import re
import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except IOError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| gpl-3.0 |
dbolgheroni/rswtch | rswtch-legacy.py | 1 | 6564 | #!/usr/bin/env python2.7
#
# Copyright (c) 2016, Daniel Bolgheroni.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import cmd
import signal
import shlex
from time import sleep
from pyfirmata import Arduino, serial
from conf import Config
class Sh(cmd.Cmd):
prompt = 'rswtch> '
intro = 'type \'help\' to see available commands'
def default(self, line):
print(line + ": not found")
def do_EOF(self, line):
exit(0)
# overwrite help, since commands are simple, do not need independent
# help for each command
def do_help(self, line):
print("{0:<16} {1}".format("COMMAND", "DESCRIPTION"))
print("{0:<16} {1}".format("annotate n \"c\"", "annotate c in channel n (use quotes)"))
print("{0:<16} {1}".format("down n", "turn off the n channel"))
print("{0:<16} {1}".format("help", "this help"))
print("{0:<16} {1}".format("reset n", "turn the n channel off and on again after 2 seconds"))
print("{0:<16} {1}".format("status", "display the status of all channels, including annotations"))
print("{0:<16} {1}".format("toggle n", "turn the n channel off if its on, and vice-versa"))
print("{0:<16} {1}".format("up n", "turn on the n channel"))
### commands
# up
def do_up(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].up()
except KeyError:
print("no channel")
# down
def do_down(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].down()
except KeyError:
print("no channel")
# toggle
def do_toggle(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].toggle()
except KeyError:
print("no channel")
# reset
def do_reset(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].reset()
except KeyError:
print("no channel")
# status
def do_status(self, line):
status()
def do_annotate(self, line):
parser = shlex.shlex(line, posix=True)
c = parser.get_token()
try:
channels[c].annotation = parser.get_token()
except KeyError:
print("no channel")
# quit
def do_quit(self, line):
exit(0)
# handle ^C
@staticmethod
def handle_sigint(signum, frame):
exit(0)
class Channel():
# the relay module uses inverted logic, so
# 1 to bring pin down and 0 bring pin up
def __init__(self, pin, boardname):
self.__pin = pin
self.boardname = boardname
self.annotation = None
# up by default
self.__pin.write(0)
def up(self):
self.__pin.write(0)
def down(self):
self.__pin.write(1)
def toggle(self):
if self.__pin.read() == 0:
self.__pin.write(1)
else:
self.__pin.write(0)
def reset(self):
self.__pin.write(1)
sleep(2)
self.__pin.write(0)
@property
def status(self):
return 'up' if self.__pin.read() == 0 else 'down'
def status():
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("CH", "STATUS", "BOARD", "ANNOTATION"))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("1", ch1.status, ch1.boardname, ch1.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("2", ch2.status, ch2.boardname, ch2.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("3", ch3.status, ch3.boardname, ch3.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("4", ch4.status, ch4.boardname, ch4.annotation))
if __name__ == '__main__':
opts = argparse.ArgumentParser()
opts.add_argument("-v", action="store_true",
help="shows board Firmata firmware version")
opts.add_argument("-f",
help="specify config file")
opts.add_argument("dev", help="serial device")
args = opts.parse_args()
# init Firmata module
try:
board = Arduino(args.dev)
except serial.serialutil.SerialException:
print("could not open port {0}".format(args.dev))
exit(1)
# try to get board firmata version
# this fails most of the times
if args.v:
v = board.get_firmata_version()
try:
print("{0}.{1}".format(v[0], v[1]))
exit(0)
except (NameError, TypeError):
print("could not get board firmata version")
exit(1)
# handle configuration file
if args.f:
config = Config(args.f)
else:
config = Config()
# turn off board led
led = board.get_pin('d:13:o')
led.write(0)
# configuring pins
ch1 = Channel(board.get_pin('d:9:o'), config.get_boardname(1))
ch2 = Channel(board.get_pin('d:8:o'), config.get_boardname(2))
ch3 = Channel(board.get_pin('d:7:o'), config.get_boardname(3))
ch4 = Channel(board.get_pin('d:6:o'), config.get_boardname(4))
channels = {'1': ch1, '2': ch2, '3': ch3, '4': ch4}
# start shell
signal.signal(signal.SIGINT, Sh.handle_sigint)
Sh().cmdloop()
| bsd-2-clause |
GaryKriebel/osf.io | framework/email/tasks.py | 32 | 1454 | import smtplib
import logging
from email.mime.text import MIMEText
from framework.tasks import app
from website import settings
logger = logging.getLogger(__name__)
@app.task
def send_email(from_addr, to_addr, subject, message, mimetype='html', ttls=True, login=True,
username=None, password=None, mail_server=None):
"""Send email to specified destination.
Email is sent from the email specified in FROM_EMAIL settings in the
settings module.
:param from_addr: A string, the sender email
:param to_addr: A string, the recipient
:param subject: subject of email
:param message: body of message
:return: True if successful
"""
username = username or settings.MAIL_USERNAME
password = password or settings.MAIL_PASSWORD
mail_server = mail_server or settings.MAIL_SERVER
if not settings.USE_EMAIL:
return
if login and (username is None or password is None):
logger.error('Mail username and password not set; skipping send.')
return
msg = MIMEText(message, mimetype, _charset='utf-8')
msg['Subject'] = subject
msg['From'] = from_addr
msg['To'] = to_addr
s = smtplib.SMTP(mail_server)
s.ehlo()
if ttls:
s.starttls()
s.ehlo()
if login:
s.login(username, password)
s.sendmail(
from_addr=from_addr,
to_addrs=[to_addr],
msg=msg.as_string()
)
s.quit()
return True
| apache-2.0 |
is06/navitia | source/jormungandr/jormungandr/renderers.py | 11 | 5578 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import json
import dict2xml
from jormungandr.protobuf_to_dict import protobuf_to_dict
from werkzeug.wrappers import Response
from .error import generate_error
def render(dico, formats, callback, status=200):
if 'application/json' in formats or 'json' in formats:
json_str = json.dumps(dico, ensure_ascii=False)
if callback == '' or callback is None:
result = Response(json_str,
mimetype='application/json;charset=utf-8',
status=status)
else:
result = Response(callback + '(' + json_str + ')',
mimetype='application/json;charset=utf-8',
status=status)
elif 'text/html' in formats or 'text/plain' in formats:
json_str = json.dumps(add_links(dico), ensure_ascii=False, indent=4)
result = Response('<html><pre>' + json_str + '</pre></html>',
mimetype='text/html;charser=utf8', status=status)
elif 'txt' in formats:
result = Response(json.dumps(dico, ensure_ascii=False, indent=4),
mimetype='text/plain;charset=utf-8', status=status)
elif 'xml' in formats:
response = '<?xml version="1.0" encoding="UTF-8"?>\n'
response += dict2xml.dict2xml(dico, wrap="Response")
result = Response(response,
mimetype='application/xml;charset=utf-8',
status=status)
elif 'pb' in formats:
r = generate_error('Protocol buffer not supported for this request',
status=404)
result = render_from_protobuf(r, 'json')
else:
error = "Unknown file format format('" + str(formats) + "')"
error += ". Please choose .json, .txt, .xml or .pb"
r = generate_error(error, status=404)
result = render_from_protobuf(r, 'json')
result.headers.add('Access-Control-Allow-Origin', '*')
return result
def search_links(dico):
result = {}
if "links" in dico:
for link in dico['links']:
if 'templated' in link and link['templated']:
if link['rel'] == "related":
for key, val in dico.iteritems():
if key != "links" and key != "pagination":
result[key] = link['href']
else:
result[link['rel']] = link['href']
return result
def add_a(obj, links, last_type):
if last_type in links:
result = "<a href='"
result += links[last_type].replace("{" + last_type + ".id}", obj)
result += "'>" + obj + "</a>"
return result
else:
return obj
def add_links(obj):
links = search_links(obj)
result = add_links_recc(obj, links)
# if obj.has_key("links"):
# for link in obj["links"]:
# if 'templated' in link and not link['templated']:
# link['href'] = "<a href='"+link['href']+"'>"+link['href']+'</a>'
return result
def add_links_recc(obj, links, last_type=None):
if isinstance(obj, type({})):
for key, value in obj.iteritems():
object_type = last_type
if key == "id":
obj[key] = add_a(value, links, object_type)
if key == "href" and last_type != "links":
obj[key] = "<a href='" + obj[key] + "'>" + obj[key] + '</a>'
if key == "links":
for link in obj["links"]:
if 'templated' in link and not link['templated']:
new_link = "<a href='" + link['href'] + "'>"
new_link += link['href'] + '</a>'
link['href'] = new_link
add_links_recc(obj[key], links, key)
elif isinstance(obj, type([])):
for value in obj:
add_links_recc(value, links, last_type)
return obj
def render_from_protobuf(pb_resp, format, callback, status=200):
if pb_resp.status_code:
status = pb_resp.status_code
pb_resp.ClearField("status_code")
if format == 'pb':
return Response(pb_resp.SerializeToString(),
mimetype='application/octet-stream', status=status)
else:
return render(protobuf_to_dict(pb_resp, use_enum_labels=True), format,
callback, status)
| agpl-3.0 |
zzzzzsh/pyspider | pyspider/webui/webdav.py | 56 | 6092 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2015-6-3 11:29
import os
import re
import time
import base64
from six import BytesIO
from wsgidav.wsgidav_app import DEFAULT_CONFIG, WsgiDAVApp
from wsgidav.dav_provider import DAVProvider, DAVCollection, DAVNonCollection
from wsgidav.dav_error import DAVError, HTTP_NOT_FOUND, HTTP_FORBIDDEN
from pyspider.libs.utils import utf8, text
from .app import app
class ContentIO(BytesIO):
def close(self):
self.content = self.getvalue()
BytesIO.close(self)
class ScriptResource(DAVNonCollection):
def __init__(self, path, environ, app, project=None):
super(ScriptResource, self).__init__(path, environ)
self.app = app
self.new_project = False
self._project = project
self.project_name = self.name
self.writebuffer = None
if self.project_name.endswith('.py'):
self.project_name = self.project_name[:-len('.py')]
@property
def project(self):
if self._project:
return self._project
projectdb = self.app.config['projectdb']
if projectdb:
self._project = projectdb.get(self.project_name)
if not self._project:
if projectdb.verify_project_name(self.project_name) and self.name.endswith('.py'):
self.new_project = True
self._project = {
'name': self.project_name,
'script': '',
'status': 'TODO',
'rate': self.app.config.get('max_rate', 1),
'burst': self.app.config.get('max_burst', 3),
'updatetime': time.time(),
}
else:
raise DAVError(HTTP_FORBIDDEN)
return self._project
@property
def readonly(self):
projectdb = self.app.config['projectdb']
if not projectdb:
return True
if 'lock' in projectdb.split_group(self.project.get('group')) \
and self.app.config.get('webui_username') \
and self.app.config.get('webui_password'):
authheader = self.environ.get("HTTP_AUTHORIZATION")
if not authheader:
return True
authheader = authheader[len("Basic "):]
try:
username, password = text(base64.b64decode(authheader)).split(':', 1)
except Exception as e:
self.app.logger.error('wrong api key: %r, %r', authheader, e)
return True
if username == self.app.config['webui_username'] \
and password == self.app.config['webui_password']:
return False
else:
return True
return False
def getContentLength(self):
return len(utf8(self.project['script']))
def getContentType(self):
return 'text/plain'
def getLastModified(self):
return self.project['updatetime']
def getContent(self):
return BytesIO(utf8(self.project['script']))
def beginWrite(self, contentType=None):
if self.readonly:
self.app.logger.error('webdav.beginWrite readonly')
return super(ScriptResource, self).beginWrite(contentType)
self.writebuffer = ContentIO()
return self.writebuffer
def endWrite(self, withErrors):
if withErrors:
self.app.logger.error('webdav.endWrite error: %r', withErrors)
return super(ScriptResource, self).endWrite(withErrors)
if not self.writebuffer:
return
projectdb = self.app.config['projectdb']
if not projectdb:
return
info = {
'script': text(getattr(self.writebuffer, 'content', ''))
}
if self.project.get('status') in ('DEBUG', 'RUNNING'):
info['status'] = 'CHECKING'
if self.new_project:
self.project.update(info)
self.new_project = False
return projectdb.insert(self.project_name, self.project)
else:
return projectdb.update(self.project_name, info)
class RootCollection(DAVCollection):
def __init__(self, path, environ, app):
super(RootCollection, self).__init__(path, environ)
self.app = app
self.projectdb = self.app.config['projectdb']
def getMemberList(self):
members = []
for project in self.projectdb.get_all():
project_name = utf8(project['name'])
if not project_name.endswith('.py'):
project_name += '.py'
members.append(ScriptResource(
os.path.join(self.path, project_name),
self.environ,
self.app,
project
))
return members
def getMemberNames(self):
members = []
for project in self.projectdb.get_all(fields=['name', ]):
project_name = utf8(project['name'])
if not project_name.endswith('.py'):
project_name += '.py'
members.append(project_name)
return members
class ScriptProvider(DAVProvider):
def __init__(self, app):
super(ScriptProvider, self).__init__()
self.app = app
def __repr__(self):
return "pyspiderScriptProvider"
def getResourceInst(self, path, environ):
path = os.path.normpath(path)
if path in ('/', '.', ''):
return RootCollection(path, environ, self.app)
else:
return ScriptResource(path, environ, self.app)
config = DEFAULT_CONFIG.copy()
config.update({
'mount_path': '/dav',
'provider_mapping': {
'/': ScriptProvider(app)
},
'user_mapping': {},
'verbose': 1 if app.debug else 0,
'dir_browser': {'davmount': False,
'enable': True,
'msmount': False,
'response_trailer': ''},
})
dav_app = WsgiDAVApp(config)
| apache-2.0 |
kevgliss/lemur | lemur/tests/vectors.py | 1 | 14798 | from lemur.common.utils import parse_certificate
VALID_USER_HEADER_TOKEN = {
'Authorization': 'Basic ' + 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE1MjE2NTIwMjIsImV4cCI6MjM4NTY1MjAyMiwic3ViIjoxfQ.uK4PZjVAs0gt6_9h2EkYkKd64nFXdOq-rHsJZzeQicc',
'Content-Type': 'application/json'
}
VALID_ADMIN_HEADER_TOKEN = {
'Authorization': 'Basic ' + 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1MjE2NTE2NjMsInN1YiI6MiwiYWlkIjoxfQ.wyf5PkQNcggLrMFqxDfzjY-GWPw_XsuWvU2GmQaC5sg',
'Content-Type': 'application/json'
}
VALID_ADMIN_API_TOKEN = {
'Authorization': 'Basic ' + 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjIsImFpZCI6MSwiaWF0IjoxNDM1MjMzMzY5fQ.umW0I_oh4MVZ2qrClzj9SfYnQl6cd0HGzh9EwkDW60I',
'Content-Type': 'application/json'
}
INTERNAL_VALID_LONG_STR = """
-----BEGIN CERTIFICATE-----
MIID1zCCAr+gAwIBAgIBATANBgkqhkiG9w0BAQsFADCBjDELMAkGA1UEBhMCVVMx
CzAJBgNVBAgMAkNBMRAwDgYDVQQHDAdBIHBsYWNlMRcwFQYDVQQDDA5sb25nLmxp
dmVkLmNvbTEQMA4GA1UECgwHRXhhbXBsZTETMBEGA1UECwwKT3BlcmF0aW9uczEe
MBwGCSqGSIb3DQEJARYPamltQGV4YW1wbGUuY29tMB4XDTE1MDYyNjIwMzA1MloX
DTQwMDEwMTIwMzA1MlowgYwxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEQMA4G
A1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5saXZlZC5jb20xEDAOBgNVBAoM
B0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMxHjAcBgkqhkiG9w0BCQEWD2pp
bUBleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKeg
sqb0HI10i2eRSx3pLeA7JoGdUpud7hy3bGws/1HgOSpRMin9Y65DEpVq2Ia9oir7
XOJLpSTEIulnBkgDHNOsdKVYHDR6k0gUisnIKSl2C3IgKHpCouwiOvvVPwd3PExg
17+d7KLBIu8LpG28wkXKFU8vSz5i7H4i/XCEChnKJ4oGJuGAJJM4Zn022U156pco
97aEAc9ZXR/1dm2njr4XxCXmrnKCYTElfRhLkmxtv+mCi6eV//5d12z7mY3dTBkQ
EG2xpb5DQ+ITQ8BzsKcPX80rz8rTzgYFwaV3gUg38+bgka/JGJq8HgBuNnHv5CeT
1T/EoZTRYW2oPfOgQK8CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B
Af8EBAMCAQYwHQYDVR0OBBYEFIuDY73dQIhj2nnd4DG2SvseHVVaMA0GCSqGSIb3
DQEBCwUAA4IBAQBk/WwfoWYdS0M8rz5tJda/cMdYFSugUbTn6JJdmHuw6RmiKzKG
8NzfSqBR6m8MWdSTuAZ/chsUZH9YEIjS9tAH9/FfUFBrsUE7TXaUgpNBm4DBLLfl
fj5xDmEyj17JPN/C36amQ9eU5BNesdCx9EkdWLyVJaM50HFRo71W0/FrpKZyKK68
XPhd1z9w/xgfCfYhe7PjEmrmNPN5Tgk5TyXW+UUhOepDctAv2DBetptcx+gHrtW+
Ygk1wptlt/tg7uUmstmXZA4vTPx83f4P3KSS3XHIYFIyGFWUDs23C20K6mmW1iXa
h0S8LN4iv/+vNFPNiM1z9X/SZgfbwZXrLsSi
-----END CERTIFICATE-----
"""
INTERNAL_VALID_LONG_CERT = parse_certificate(INTERNAL_VALID_LONG_STR)
INTERNAL_INVALID_STR = """
-----BEGIN CERTIFICATE-----
MIIEFTCCAv2gAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTEQMA4GA1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5s
aXZlZC5jb20xEDAOBgNVBAoMB0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMx
HjAcBgkqhkiG9w0BCQEWD2ppbUBleGFtcGxlLmNvbTAeFw0xNTA2MjYyMDM2NDha
Fw0xNTA2MjcyMDM2NDhaMGkxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4G
A1UEBxMHQSBwbGFjZTEQMA4GA1UEChMHRXhhbXBsZTETMBEGA1UECxMKT3BlcmF0
aW9uczEUMBIGA1UEAxMLZXhwaXJlZC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCcSMzRxB6+UONPqYMy1Ojw3Wi8DIpt9USnSR60I8LiEuRK2ayr
0RMjLJ6sBEgy/hISEqpLgTsciDpxwaTC/WNrkT9vaMcwfiG3V0Red8zbKHQzC+Ty
cLRg9wbC3v613kaIZCQCoE7Aouru9WbVPmuRoasfztrgksWmH9infQbL4TDcmcxo
qGaMn4ajQTVAD63CKnut+CULZIMBREBVlSTLiOO7qZdTrd+vjtLWvdXVPcWLSBrd
Vpu3YnhqqTte+DMzQHwY7A2s3fu4Cg4H4npzcR+0H1H/B5z64kxqZq9FWGIcZcz7
0xXeHN9UUKPDSTgsjtIzKTaIOe9eML3jGSU7AgMBAAGjgaIwgZ8wDAYDVR0TAQH/
BAIwADAOBgNVHQ8BAf8EBAMCBaAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwEwHQYD
VR0OBBYEFKwBYaxCLxK0csmV319rbRdqDllWMEgGA1UdHwRBMD8wPaA7oDmGN2h0
dHA6Ly90ZXN0LmNsb3VkY2EuY3JsLm5ldGZsaXguY29tL2xvbmdsaXZlZENBL2Ny
bC5wZW0wDQYJKoZIhvcNAQELBQADggEBADFngqsMsGnNBWknphLDvnoWu5MTrpsD
AgN0bktv5ACKRWhi/qtCmkEf6TieecRMwpQNMpE50dko3LGGdWlZRCI8wdH/zrw2
8MnOeCBxuS1nB4muUGjbf4LIbtuwoHSESrkfmuKjGGK9JTszLL6Hb9YnoFefeg8L
T7W3s8mm5bVHhQM7J9tV6dz/sVDmpOSuzL8oZkqeKP+lWU6ytaohFFpbdzaxWipU
3+GobVe4vRqoF1kwuhQ8YbMbXWDK6zlrT9pjFABcQ/b5nveiW93JDQUbjmVccx/u
kP+oGWtHvhteUAe8Gloo5NchZJ0/BqlYRCD5aAHcmbXRsDid9mO4ADU=
-----END CERTIFICATE-----
"""
INTERNAL_INVALID_CERT = parse_certificate(INTERNAL_INVALID_STR)
INTERNAL_VALID_SAN_STR = """
-----BEGIN CERTIFICATE-----
MIIESjCCAzKgAwIBAgICA+kwDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTEQMA4GA1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5s
aXZlZC5jb20xEDAOBgNVBAoMB0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMx
HjAcBgkqhkiG9w0BCQEWD2ppbUBleGFtcGxlLmNvbTAeFw0xNTA2MjYyMDU5MDZa
Fw0yMDAxMDEyMDU5MDZaMG0xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4G
A1UEBxMHQSBwbGFjZTEQMA4GA1UEChMHRXhhbXBsZTETMBEGA1UECxMKT3BlcmF0
aW9uczEYMBYGA1UEAxMPc2FuLmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEA2Nq5zFh2WiqtNIPssdSwQ9/00j370VcKPlOATLqK24Q+
dr2hWP1WlZJ0NOoPefhoIysccs2tRivosTpViRAzNJXigBHhxe8ger0QhVW6AXIp
ov327N689TgY4GzRrwqavjz8cqussIcnEUr4NLLsU5AvXE7e3WxYkkskzO497UOI
uCBtWdCXZ4cAGhtVkkA5uQHfPsLmgRVoUmdMDt5ZmA8HhLX4X6vkT3oGIhdGCw6T
W+Cu7PfYlSaggSBbBniU0YKTFLfGLkYFZN/b6bxzvt6CTJLoVFAYXyLJwUvd3EAm
u23HgUflIyZNG3xVPml/lah0OIX7RtSigXUSLm7lYwIDAQABo4HTMIHQMAwGA1Ud
EwEB/wQCMAAwDgYDVR0PAQH/BAQDAgWgMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMB
MC8GA1UdEQQoMCaCEWV4YW1wbGUyLmxvbmcuY29tghFleGFtcGxlMy5sb25nLmNv
bTAdBgNVHQ4EFgQUiiIyclcBIfJ5PE3OCcTXwzJAM+0wSAYDVR0fBEEwPzA9oDug
OYY3aHR0cDovL3Rlc3QuY2xvdWRjYS5jcmwubmV0ZmxpeC5jb20vbG9uZ2xpdmVk
Q0EvY3JsLnBlbTANBgkqhkiG9w0BAQsFAAOCAQEAgcTioq70B/aPWovNTy+84wLw
VX1q6bCdH3FJwAv2rc28CHp5mCGdR6JqfT/H/CbfRwT1Yh/5i7T5kEVyz+Dp3+p+
AJ2xauHrTvWn0QHQYbUWICwkuZ7VTI9nd0Fry1FQI1EeKiCmyrzNljiN2l+GZw6i
NJUpVNtwRyWRzB+yIx2E9wyydqDFH+sROuQok7EgzlQileitPrF4RrkfIhQp2/ki
YBrY/duF15YpoMKAlFhDBh6R9/nb5kI2n3pY6I5h6LEYfLStazXbIu61M8zu9TM/
+t5Oz6rmcjohL22+sEmmRz86dQZlrBBUxX0kCQj6OAFB4awtRd4fKtkCkZhvhQ==
-----END CERTIFICATE-----
"""
INTERNAL_VALID_SAN_CERT = parse_certificate(INTERNAL_VALID_SAN_STR)
INTERNAL_VALID_WILDCARD_STR = """
-----BEGIN CERTIFICATE-----
MIIEHDCCAwSgAwIBAgICA+owDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTEQMA4GA1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5s
aXZlZC5jb20xEDAOBgNVBAoMB0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMx
HjAcBgkqhkiG9w0BCQEWD2ppbUBleGFtcGxlLmNvbTAeFw0xNTA2MjYyMTEzMTBa
Fw0yMDAxMDEyMTEzMTBaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4G
A1UEBxMHQSBwbGFjZTEQMA4GA1UEChMHRXhhbXBsZTETMBEGA1UECxMKT3BlcmF0
aW9uczEbMBkGA1UEAxQSKi50ZXN0LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA0T7OEY9FxMIdhe1CwLc+TbDeSfDN6KRHlp0I9MwK
3Pre7A1+1vmRzLiS5qAdOh3Oexelmgdkn/fZUFI+IqEVJwmeUiq13Kib3BFnVtbB
N1RdT7rZF24Bqwygf1DHAekEBYdvu4dGD/gYKsLYsSMD7g6glUuhTbgR871updcV
USYJ801y640CcHjai8UCLxpqtkP/Alob+/KDczUHbhdxYgmH34aQgxC8zg+uzuq6
bIqUAc6SctI+6ArXOqri7wSMgZUnogpF4R5QbCnlDfSzNcNxJFtGp8cy7CNWebMd
IWgBYwee8i8S6Q90B2QUFD9EGG2pEZldpudTxWUpq0tWmwIDAQABo4GiMIGfMAwG
A1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgWgMBYGA1UdJQEB/wQMMAoGCCsGAQUF
BwMBMB0GA1UdDgQWBBTH2KIECrqPHMbsVysGv7ggkYYZGDBIBgNVHR8EQTA/MD2g
O6A5hjdodHRwOi8vdGVzdC5jbG91ZGNhLmNybC5uZXRmbGl4LmNvbS9sb25nbGl2
ZWRDQS9jcmwucGVtMA0GCSqGSIb3DQEBCwUAA4IBAQBjjfur2B6BcdIQIouwhXGk
IFE5gUYMK5S8Crf/lpMxwHdWK8QM1BpJu9gIo6VoM8uFVa8qlY8LN0SyNyWw+qU5
Jc8X/qCeeJwXEyXY3dIYRT/1aj7FCc7EFn1j6pcHPD6/0M2z0Zmj+1rWNBJdcYor
pCy27OgRoJKZ6YhEYekzwIPeFPL6irIN9xKPnfH0b2cnYa/g56DyGmyKH2Kkhz0A
UGniiUh4bAUuppbtSIvUTsRsJuPYOqHC3h8791JZ/3Sr5uB7QbCdz9K14c9zi6Z1
S0Xb3ZauZJQI7OdHeUPDRVq+8hcG77sopN9pEYrIH08oxvLX2US3GqrowjOxthRa
-----END CERTIFICATE-----
"""
INTERNAL_VALID_WILDCARD_CERT = parse_certificate(INTERNAL_VALID_WILDCARD_STR)
EXTERNAL_VALID_STR = """
-----BEGIN CERTIFICATE-----
MIIFHzCCBAegAwIBAgIQGFWCciDWzbOej/TbAJN0WzANBgkqhkiG9w0BAQsFADCB
pDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w
HQYDVQQLExZGT1IgVEVTVCBQVVJQT1NFUyBPTkxZMR8wHQYDVQQLExZTeW1hbnRl
YyBUcnVzdCBOZXR3b3JrMTQwMgYDVQQDEytTeW1hbnRlYyBDbGFzcyAzIFNlY3Vy
ZSBTZXJ2ZXIgVEVTVCBDQSAtIEc0MB4XDTE1MDYyNDAwMDAwMFoXDTE1MDYyNTIz
NTk1OVowgYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDQUxJRk9STklBMRIwEAYD
VQQHDAlMb3MgR2F0b3MxFjAUBgNVBAoMDU5ldGZsaXgsIEluYy4xEzARBgNVBAsM
Ck9wZXJhdGlvbnMxHjAcBgNVBAMMFXR0dHQyLm5ldGZsaXh0ZXN0Lm5ldDCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALwMY/yod9YGLKLCzbbsSUBWm4ZC
DfcgbUNL3JLtZaFCaOeUPLa4YNqty+9ACXBLYPNMm+dgsRHix8N2uwtZrGazHILK
qey96eSTosPsvKFt0KLNpUl8GC/YxA69L128SJgFaaq5Dr2Mp3NP0rt0RIz5luPj
Oae0hkGOS8uS0dySlAmfOw2OsJY3gCw5UHcmpcCHpO2f7uU+tWKmgfz4U/PpQ0kz
WVJno+JhcaXIximtiLreCNF1LpraAjrcZJ+ySJwYaLaYMiJoFkdXUtKJcyqmkbA3
Splt7N4Hb8c+5aXv225uQYCh0HXQeMyBotlaIrAddP5obrtjxhXBxB4ysEcCAwEA
AaOCAWowggFmMCAGA1UdEQQZMBeCFXR0dHQyLm5ldGZsaXh0ZXN0Lm5ldDAJBgNV
HRMEAjAAMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB
BQUHAwIwYQYDVR0gBFowWDBWBgZngQwBAgIwTDAjBggrBgEFBQcCARYXaHR0cHM6
Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYBBQUHAgIwGRoXaHR0cHM6Ly9kLnN5bWNi
LmNvbS9ycGEwHwYDVR0jBBgwFoAUNI9UtT8KH1K6nLJl7bqLCGcZ4AQwKwYDVR0f
BCQwIjAgoB6gHIYaaHR0cDovL3NzLnN5bWNiLmNvbS9zcy5jcmwwVwYIKwYBBQUH
AQEESzBJMB8GCCsGAQUFBzABhhNodHRwOi8vc3Muc3ltY2QuY29tMCYGCCsGAQUF
BzAChhpodHRwOi8vc3Muc3ltY2IuY29tL3NzLmNydDANBgkqhkiG9w0BAQsFAAOC
AQEAQuIfyBltvCZ9orqNdS6PUo2PaeUgJzkmdDwbDVd7rTwbZIwGZXZjeKseqMSb
L+r/jN6DWrScVylleiz0N/D0lSUhC609dQKuicGpy3yQaXwhfYZ6duxrW3Ii/+Vz
pFv7DnG3JPZjIXCmVhQVIv/8oaV0bfUF/1mrWRFwZiBILxa7iaycRhjusJEVRtzN
Ot/qkLluHO0wbEHnASV4P9Y5NuR/bliuFS/DeRczofNS78jJuZrGvl2AqS/19Hvm
Bs63gULVCqWygt5KEbv990m/XGuRMaXuHzHCHB4v5LRM30FiFmqCzyD8d+btzW9B
1hZ5s3rj+a6UwvpinKJoPfgkgg==
-----END CERTIFICATE-----
"""
EXTERNAL_CERT = parse_certificate(EXTERNAL_VALID_STR)
PRIVATE_KEY_STR = """
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAnEjM0cQevlDjT6mDMtTo8N1ovAyKbfVEp0ketCPC4hLkStms
q9ETIyyerARIMv4SEhKqS4E7HIg6ccGkwv1ja5E/b2jHMH4ht1dEXnfM2yh0Mwvk
8nC0YPcGwt7+td5GiGQkAqBOwKLq7vVm1T5rkaGrH87a4JLFph/Yp30Gy+Ew3JnM
aKhmjJ+Go0E1QA+twip7rfglC2SDAURAVZUky4jju6mXU63fr47S1r3V1T3Fi0ga
3Vabt2J4aqk7XvgzM0B8GOwNrN37uAoOB+J6c3EftB9R/wec+uJMamavRVhiHGXM
+9MV3hzfVFCjw0k4LI7SMyk2iDnvXjC94xklOwIDAQABAoIBAGeykly5MeD70OgB
xPEMfoebkav88jklnekVxk6mz9+rw1i6+CyFLJqRN7NRoApdtOXTBrXUyMEUzxq9
7zIGaVptZNbqggh2GK8LM20vNnlQbVGVmdMX30fbgNv6lK1eEBTdxVsMvVRqhVIK
+LGTmlJmICKZ4XdTS9v/k4UGm2TZPCt2pvrNzIpT7TIm2QybCbZoOPY8SHx0U8c5
lmtdqmIsy2JPNSOsOCiJgzQIvkR/fMGWFgNE4fEHsHAfubgpK97TGzwLiFRmlTb+
QUDaz0YbwhF+5bQjHtaGUGATcg5bvV1UWBUvp+g4gRIfwzG+3PAGacYE/djouAdG
PHbxuCkCgYEAz/LsgMgsaV3arlounviSwc8wG9WcI5gbYw5qwX0P57ZoxS7EBAGu
yYtudurJrU9SfsSV44GL11UzBcAGOeS0btddrcMiNBhc7fY7P/1xaufQ3GjG06/v
kH4gOjzsGSTJliZ709g4J6hnMCxz0O0PS31Qg5cBD8UG8xO7/AV0is0CgYEAwGWy
A6YPinpZuenaxrivM5AcVDWmj7aeC29M63l/GY+O5LQH2PKVESH0vL5PvG3LkrCR
SUbaMKdKR0wnZsJ89z21eZ54ydUgj41bZJczl8drxcY0GSajj6XZXGTUjtoVrWsB
A0kJbjsrpd+8J316Y9iCgpopmbVd965pUHe4ACcCgYAamJlDB1cWytgzQHmB/4zV
mOgwRyvHKacnDir9QD+OhTf1MDwFvylZwamJMBJHRkPozr/U7zaxfcYe0CZ7tRKW
spjapoBzZUJNdRay4nllEO0Xo5b6cCAVvOvmRvBzbs8Rky53M8pK2DEKakUNzaQN
JaPskJ2kJLD02etLGm+DaQKBgQCTI/NNmQ2foUzHw1J+0jWjoJ4ZxOI6XLZoFlnk
aInMuZ7Vx92MjJF2hdqPEpkWiX28FO839EjgFsDW4CXuD+XUjEwi1BCagzWgs8Hm
n0Bk3q3MlnW3mnZSYMtoPvDUw3L6qrAenBfrRrNt6zsRlIQqoiXFzjLsi+luh+Oh
F74P1wKBgQCPQGKLUcfAvjIcZp4ECH0K8sBEmoEf8pceuALZ3H5vneYDzqMDIceo
t5Gpocpt77LJnNiszXSerj/KjX2MflY5xUXeekWowLVTBOK5+CZ8+XBIgBt1hIG3
XKxcRgm/Va4QMEAnec0qXfdTVJaJiAW0bdKwKRRrrbwcTdNRGibdng==
-----END RSA PRIVATE KEY-----
"""
INTERNAL_CERTIFICATE_A_STR = """
-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIBATANBgkqhkiG9w0BAQsFADB5MQswCQYDVQQGEwJVUzET
MBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJTG9zIEdhdG9zMRYwFAYDVQQK
DA1OZXRmbGl4LCBJbmMuMRMwEQYDVQQLDApPcGVyYXRpb25zMRQwEgYDVQQDDAtB
Y29tbW9uTmFtZTAeFw0xNjA2MjkyMjE0NDdaFw0zNjA2MjkyMjE0NDdaMHkxCzAJ
BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlMb3MgR2F0
b3MxFjAUBgNVBAoMDU5ldGZsaXgsIEluYy4xEzARBgNVBAsMCk9wZXJhdGlvbnMx
FDASBgNVBAMMC0Fjb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAtkyvL6EqSgYSJX11635Hb8FBG/8Wey6C2KtG7M+GXvGCsSmfNqQMeZdf
W9Avxelkstp5/K+ilVJJ2TJRelu1yVUUkQcrP7imgf7CxKQAnPz2oXQImLFbm7OS
1zKA+qwtLGrId3vVQaotUtdI+wxx0YE66pyfOhQJsVOeuYwG8CCxnAj/lXeNLA1t
n39A8FLfj9nxjvZWWm2z8qXO2IYOWEMOOel1zixhypeJoTD2cJHDKNlUnXN4q5ej
psD4ehLFXIPXsKJv5XOtNYB9UHB3moXlEOuKAquRzBOfTP+rUYyfbHmzCN4eXekp
R6vze49hlg8QdCNjVY6jHRrOuVKGuwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAt
rE2Ee6a0zRlJHiuP5Zr61s6ZnwIsPN5sjo3pFJ/goHeNWbq+02FUJLXROtxSMlo8
jLYpnQbm3Qoyd0KjGn9myP1vqBL6Yzf9dRI2li9XYmavxU7OK/KJtBo/Wnw3DVT5
jxYrn4YKJU9+T0hr57bWUQ7HjMNojwBcgglzPN9KOtfTfbPEUIeoRpCjeyjwBUSN
nrTDiYPV+XI4LAyDmuR7esSvm2+0h6C0dmUbVspkxBaKFEYUKIYaZbEFEBsyZGri
qDIyu9HSvu2MJ2lVxfMNsW+IYG74DOqJQsIFP+7hrfdPoMGm4GvAiHR1IuSmq+sf
L0Ew8hy0GG3nZ6uXLW7q
-----END CERTIFICATE-----
"""
INTERNAL_PRIVATE_KEY_A_STR = """
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAtkyvL6EqSgYSJX11635Hb8FBG/8Wey6C2KtG7M+GXvGCsSmf
NqQMeZdfW9Avxelkstp5/K+ilVJJ2TJRelu1yVUUkQcrP7imgf7CxKQAnPz2oXQI
mLFbm7OS1zKA+qwtLGrId3vVQaotUtdI+wxx0YE66pyfOhQJsVOeuYwG8CCxnAj/
lXeNLA1tn39A8FLfj9nxjvZWWm2z8qXO2IYOWEMOOel1zixhypeJoTD2cJHDKNlU
nXN4q5ejpsD4ehLFXIPXsKJv5XOtNYB9UHB3moXlEOuKAquRzBOfTP+rUYyfbHmz
CN4eXekpR6vze49hlg8QdCNjVY6jHRrOuVKGuwIDAQABAoIBACYPnqfwGzc3S0Se
jCctx1Zy39grixMO4+y+3eEFdwWNoP7CNOagm6YrT5KIxeCpWQfqi3uRY/2PH7IE
SnSkfzDY3aFmAMaeE82iViHeJ+6e9hNBeaX/qaO5e1gIyFsN5aSXauFfbmf2Ut4v
6qHXuE/Ijnd7WdczZc6rKcGNlck+f/QtsZhYEYbgHT3Nrt0ztlvkdrcyRIxZTeS7
7gvVWrVv6rviTobi/ZkeM9pqe5bbLuWgb/ArvI52pJwaUcz9LPGo+miank6e4gAd
cTudoREtBKVgXROhTSz33mdjjUTCDGdtILTztDSgLpJXYT0w2h1zmfV7t4tztzzQ
xW5LVCECgYEA33YG/gaZbfH6szC/heilojelrIG+n7GjsqpfMqGFofYNBAswUC3w
qZdeXxqGZEXC8mx8CufDhC50vJv353WAHaFFJcwy2QeGvHfPAZ4ZQ68o9XLeva4t
M6+ZtOiaK8u/mzxq43Jj7FbXmxxlJXY3B0uWdWpKGsPRTmSaUw0lKPECgYEA0NhG
74C6zRgHY2Eq2Qq7+NtlvpzUtVtalhiDoCEpDMhjzLUTBNy6yMsSdP8SyCy9O7Ng
rrXJdgKHvpjnJyUvB3hhEAurPIPWJArEfEHAF+V8mHY8f58xZqgHRsYsH3tWHYx4
2lzmposTES5KKV4xsYbjjyzXX+WNdaOkC4JBCmsCgYEA3j2JKL0xfMordmlmIWzG
xnWnnNCQ4EwQrVGKSlWgDPsj6MCj9Sorbs9veRBtVm6XOvkvyLzFk8GMMkTAIf+X
QmCw362daIF2vBw/0bEGGW2sQ6hR5L3EkOH08ZpgMmx6DI7jE4Ah5txbpBVydvaC
Ngw0AGSMfOABW4DshurM6VECgYEAxeH3rJ2r4gL/lSGPaOGr5At2Z1rQjRqHRarq
pQJmk+8X6PI1mCjRbspDrcm2cSc7EmNPm5sxzXhuSKE2fLfVzN06EusLkCZW9AWj
0Ry3t6zBFvEJN9+N/nf9lQjW6+mAWjUsmbLm9SzXnzLeID5ZFZ365kGVvQ6Tr8Cj
AiikGgsCgYEAlYGNwBKWClm797YVyPhmqrFX4T9Hpxc7oC3vVwd96tAbLlSrW8r5
o6ynBW1bG+qfjx9GyThgudvRtB+0vTSShrT5GftLCyMtOiYSHkGEvMOGFBuowzoz
3i841gR9+cwA0S1hy7fC0PDmTo0xC91JocwesPQ023MmECPfu6Frzog=
-----END RSA PRIVATE KEY-----
"""
CSR_STR = """
-----BEGIN CERTIFICATE REQUEST-----
MIIC1zCCAb8CAQAwczEUMBIGA1UEAwwLQUNvbW1vbk5hbWUxFTATBgNVBAoMDG9y
Z2FuaXphdGlvbjEOMAwGA1UECwwFZ3VuaXQxCzAJBgNVBAYTAlVTMRMwEQYDVQQI
DApDYWxpZm9ybmlhMRIwEAYDVQQHDAlzb21ld2hlcmUwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDNnY+Ap+V9+Eg/PAtd7bq27D7tDvbL10AysNUSazy7
gJyHfJyE3oiXm28zjFNzRQ35qhsCFpWg8M36FpdP9fIFG9sVXV/ye+YNBkZ2aTJi
RnbErZcy8qc+2MRd2JKE9g0pISp9hAEeEPLTwSoGqf5VqOaBehBqL5OKNUr7JAxV
TIH1oVU87w/6xg/WsUiyPo49WXxF/3DZNP1UOTYiffxIiARhTb9EtlXpt5iOlic3
w/vBX6qsH++XJIus2WE+ABlAVUQTCvc6bgpu4zjc8nlm3ClqkAKcxn2ubEder+Fh
hagMYGsbYG+/IWrKYN6S0BjE26tNMiOlmIebimjEdFpnAgMBAAGgHzAdBgkqhkiG
9w0BCQ4xEDAOMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEBAE5OKI/n
b1ZRJDL4SpjWggRjfwBdYmb96lGH0aGDoVUP9UUusLzpWLtutkgr9Hh29agSsLZF
j535NeXHf+Jc4UyR288WQVJthgAT1e5+jBNPxz4IcTnDW7ZMJLGm495XaKi6Krcg
+8Qn2+h04jBTbN2Z9+MXGak0B8ycrbDx/FYL4KgBJRvS805d43zC6L1aUfRbpZgN
QeQoBdLhFNB1kAYSWCyETwRQOeGEphBJYBPcXsQVBWbMtLpbhjRZ1uTVZEFIh8Oa
zm3Cn4Ul8DO26w9QS4fmZjmnPOZFXYMWoOR6osHzb62PWQ8FBMqXcdToBV2Q9Iw4
PiFAxlc0tVjlLqQ=
-----END CERTIFICATE REQUEST-----
"""
| apache-2.0 |
purpleidea/macaronic-net | django/utils/synch.py | 376 | 2549 | """
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by eugene@lazutkin.com)
"""
try:
import threading
except ImportError:
import dummy_threading as threading
class RWLock:
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
finally:
self.mutex.release()
self.can_read.acquire()
def reader_leaves(self):
self.mutex.acquire()
try:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
finally:
self.mutex.release()
def writer_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
finally:
self.mutex.release()
self.can_write.acquire()
def writer_leaves(self):
self.mutex.acquire()
try:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
finally:
self.mutex.release()
| agpl-3.0 |
reiaaoyama/contrail-controller | src/config/vnc_openstack/vnc_openstack/tests/test_basic.py | 7 | 8854 | import sys
import json
from testtools.matchers import Equals, Contains
sys.path.append('../common/tests')
from test_utils import *
import test_common
import test_case
class TestBasic(test_case.NeutronBackendTestCase):
def test_list_with_inconsistent_members(self):
self.skipTest("Skipping this flakky test, till finding the"
" root cause for the first run failure")
# 1. create collection
# 2. list, verify full collection
# 3. mess with one in vnc_to_neutron, verify collection-1
# 4. restore, list, verify full collection
proj_obj = self._vnc_lib.project_read(
fq_name=['default-domain', 'default-project'])
objects = {}
for (obj_type, obj_class, create_method_name) in \
[('virtual_network', vnc_api.VirtualNetwork,
'virtual_network_create'),
('network_ipam', vnc_api.NetworkIpam,
'network_ipam_create'),
('network_policy', vnc_api.NetworkPolicy,
'network_policy_create'),
('logical_router', vnc_api.LogicalRouter,
'logical_router_create'),
('security_group', vnc_api.SecurityGroup,
'security_group_create'),
('route_table', vnc_api.RouteTable,
'route_table_create'),
('service_instance', vnc_api.ServiceInstance,
'service_instance_create')]:
objects[obj_type] = [obj_class('%s-%s' %(self.id(), i))
for i in range(3)]
for obj in objects[obj_type]:
create_method = getattr(self._vnc_lib, create_method_name)
create_method(obj)
objects['virtual_machine_interface'] = \
[vnc_api.VirtualMachineInterface('%s-%s' %(self.id(), i), proj_obj)
for i in range(3)]
for obj in objects['virtual_machine_interface']:
obj.add_virtual_network(vnc_api.VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(obj)
vn_obj = vnc_api.VirtualNetwork(self.id())
sn0_id = str(uuid.uuid4())
sn1_id = str(uuid.uuid4())
sn2_id = str(uuid.uuid4())
vn_obj.add_network_ipam(vnc_api.NetworkIpam(),
vnc_api.VnSubnetsType(
[vnc_api.IpamSubnetType(vnc_api.SubnetType('1.1.1.0', 28),
subnet_uuid=sn0_id),
vnc_api.IpamSubnetType(vnc_api.SubnetType('2.2.2.0', 28),
subnet_uuid=sn1_id),
vnc_api.IpamSubnetType(vnc_api.SubnetType('3.3.3.0', 28),
subnet_uuid=sn2_id)]))
self._vnc_lib.virtual_network_create(vn_obj)
fip_pool_obj = vnc_api.FloatingIpPool(self.id(), vn_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
objects['floating_ip'] = [vnc_api.FloatingIp('%s-%s' %(self.id(), i),
fip_pool_obj)
for i in range(3)]
for obj in objects['floating_ip']:
obj.add_project(proj_obj)
self._vnc_lib.floating_ip_create(obj)
collection_types = [
(objects['virtual_network'], 'network',
'_network_vnc_to_neutron'),
(objects['virtual_machine_interface'], 'port',
'_port_vnc_to_neutron'),
(objects['network_ipam'], 'ipam',
'_ipam_vnc_to_neutron'),
(objects['network_policy'], 'policy',
'_policy_vnc_to_neutron'),
(objects['logical_router'], 'router',
'_router_vnc_to_neutron'),
(objects['floating_ip'], 'floatingip',
'_floatingip_vnc_to_neutron'),
(objects['security_group'], 'security_group',
'_security_group_vnc_to_neutron'),
(objects['route_table'], 'route_table',
'_route_table_vnc_to_neutron'),
(objects['service_instance'], 'nat_instance',
'_svc_instance_vnc_to_neutron'),
]
def list_resource(url_pfx):
context = {'operation': 'READALL',
'user_id': '',
'tenant_id': proj_obj.uuid,
'roles': '',
'is_admin': 'False'}
data = {'fields': None, 'filters': {}}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json(
'/neutron/%s' %(url_pfx), body)
return json.loads(resp.text)
# for collections that are objects in contrail model
for (objects, res_url_pfx, res_xlate_name) in collection_types:
res_dicts = list_resource(res_url_pfx)
present_ids = [r['id'] for r in res_dicts]
for obj in objects:
self.assertIn(obj.uuid, present_ids)
neutron_api_obj = FakeExtensionManager.get_extension_objects(
'vnc_cfg_api.neutronApi')[0]
neutron_db_obj = neutron_api_obj._npi._cfgdb
def err_on_object_2(orig_method, res_obj, *args, **kwargs):
if res_obj.uuid == objects[2].uuid:
raise Exception('faking inconsistent element')
return orig_method(res_obj, *args, **kwargs)
with test_common.patch(
neutron_db_obj, res_xlate_name, err_on_object_2):
res_dicts = list_resource(res_url_pfx)
present_ids = [r['id'] for r in res_dicts]
self.assertNotIn(objects[2].uuid, present_ids)
res_dicts = list_resource(res_url_pfx)
present_ids = [r['id'] for r in res_dicts]
for obj in objects:
self.assertIn(obj.uuid, present_ids)
# end for collections that are objects in contrail model
# subnets, sg-rules etc.
res_dicts = list_resource('subnet')
present_ids = [r['id'] for r in res_dicts]
for sn_id in [sn0_id, sn1_id, sn2_id]:
self.assertIn(sn_id, present_ids)
def err_on_sn2(orig_method, subnet_vnc, *args, **kwargs):
if subnet_vnc.subnet_uuid == sn2_id:
raise Exception('faking inconsistent element')
return orig_method(subnet_vnc, *args, **kwargs)
with test_common.patch(
neutron_db_obj, '_subnet_vnc_to_neutron', err_on_sn2):
res_dicts = list_resource('subnet')
present_ids = [r['id'] for r in res_dicts]
self.assertNotIn(sn2_id, present_ids)
# end test_list_with_inconsistent_members
def test_extra_fields_on_network(self):
test_obj = self._create_test_object()
context = {'operation': 'READ',
'user_id': '',
'roles': ''}
data = {'fields': None,
'id': test_obj.uuid}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/network', body)
net_dict = json.loads(resp.text)
self.assertIn('contrail:fq_name', net_dict)
# end test_extra_fields_on_network
# end class TestBasic
class TestExtraFieldsPresenceByKnob(test_case.NeutronBackendTestCase):
def __init__(self, *args, **kwargs):
super(TestExtraFieldsPresenceByKnob, self).__init__(*args, **kwargs)
self._config_knobs.append(('NEUTRON', 'contrail_extensions_enabled', True))
# end __init__
def test_extra_fields_on_network(self):
test_obj = self._create_test_object()
context = {'operation': 'READ',
'user_id': '',
'roles': ''}
data = {'fields': None,
'id': test_obj.uuid}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/network', body)
net_dict = json.loads(resp.text)
self.assertIn('contrail:fq_name', net_dict)
# end test_extra_fields_on_network
# end class TestExtraFieldsPresenceByKnob
class TestExtraFieldsAbsenceByKnob(test_case.NeutronBackendTestCase):
def __init__(self, *args, **kwargs):
super(TestExtraFieldsAbsenceByKnob, self).__init__(*args, **kwargs)
self._config_knobs.append(('NEUTRON', 'contrail_extensions_enabled', False))
# end __init__
def test_no_extra_fields_on_network(self):
test_obj = self._create_test_object()
context = {'operation': 'READ',
'user_id': '',
'roles': ''}
data = {'fields': None,
'id': test_obj.uuid}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/network', body)
net_dict = json.loads(resp.text)
self.assertNotIn('contrail:fq_name', net_dict)
# end test_extra_fields_on_network
# end class TestExtraFieldsAbsenceByKnob
| apache-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/IPython/testing/plugin/ipdoctest.py | 5 | 31223 | """Nose Plugin that supports IPython doctests.
Limitations:
- When generating examples for use as doctests, make sure that you have
pretty-printing OFF. This can be done either by setting the
``PlainTextFormatter.pprint`` option in your configuration file to False, or
by interactively disabling it with %Pprint. This is required so that IPython
output matches that of normal Python, which is used by doctest for internal
execution.
- Do not rely on specific prompt numbers for results (such as using
'_34==True', for example). For IPython tests run via an external process the
prompt numbers may be different, and IPython tests run as normal python code
won't even have these special _NN variables set at all.
"""
#-----------------------------------------------------------------------------
# Module imports
# From the standard library
import doctest
import inspect
import logging
import os
import re
import sys
import traceback
import unittest
from inspect import getmodule
# We are overriding the default doctest runner, so we need to import a few
# things from doctest directly
from doctest import (REPORTING_FLAGS, REPORT_ONLY_FIRST_FAILURE,
_unittest_reportflags, DocTestRunner,
_extract_future_flags, pdb, _OutputRedirectingPdb,
_exception_traceback,
linecache)
# Third-party modules
import nose.core
from nose.plugins import doctests, Plugin
from nose.util import anyp, getpackage, test_address, resolve_name, tolist
# Our own imports
from IPython.utils.py3compat import builtin_mod, PY3, getcwd
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Module globals and other constants
#-----------------------------------------------------------------------------
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def is_extension_module(filename):
"""Return whether the given filename is an extension module.
This simply checks that the extension is either .so or .pyd.
"""
return os.path.splitext(filename)[1].lower() in ('.so','.pyd')
class DocTestSkip(object):
"""Object wrapper for doctests to be skipped."""
ds_skip = """Doctest to skip.
>>> 1 #doctest: +SKIP
"""
def __init__(self,obj):
self.obj = obj
def __getattribute__(self,key):
if key == '__doc__':
return DocTestSkip.ds_skip
else:
return getattr(object.__getattribute__(self,'obj'),key)
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class DocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
return module.__name__ == object.__module__
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
elif inspect.ismethoddescriptor(object):
# Unbound PyQt signals reach this point in Python 3.4b3, and we want
# to avoid throwing an error. See also http://bugs.python.org/issue3158
return False
else:
raise ValueError("object must be a class or function, got %r" % object)
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
print('_find for:', obj, name, module) # dbg
if hasattr(obj,"skip_doctest"):
#print 'SKIPPING DOCTEST FOR:',obj # dbg
obj = DocTestSkip(obj)
doctest.DocTestFinder._find(self,tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import isroutine, isclass, ismodule
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val) ):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
class IPDoctestOutputChecker(doctest.OutputChecker):
"""Second-chance checker with support for random tests.
If the default comparison doesn't pass, this checker looks in the expected
output string for flags that tell us to ignore the output.
"""
random_re = re.compile(r'#\s*random\s+')
def check_output(self, want, got, optionflags):
"""Check output, accepting special markers embedded in the output.
If the output didn't pass the default validation but the special string
'#random' is included, we accept it."""
# Let the original tester verify first, in case people have valid tests
# that happen to have a comment saying '#random' embedded in.
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret and self.random_re.search(want):
#print >> sys.stderr, 'RANDOM OK:',want # dbg
return True
return ret
class DocTestCase(doctests.DocTestCase):
"""Proxy for DocTestCase: provides an address() method that
returns the correct address for the doctest case. Otherwise
acts as a proxy to the test case. To provide hints for address(),
an obj may also be passed -- this will be used as the test object
for purposes of determining the test address, if it is provided.
"""
# Note: this method was taken from numpy's nosetester module.
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
doctests.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
# Now we must actually copy the original constructor from the stdlib
# doctest class, because we can't call it directly and a bug in nose
# means it never gets passed the right arguments.
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_test_globs_ori = test.globs
self._dt_setUp = setUp
self._dt_tearDown = tearDown
# XXX - store this runner once in the object!
runner = IPDocTestRunner(optionflags=optionflags,
checker=checker, verbose=False)
self._dt_runner = runner
# Each doctest should remember the directory it was loaded from, so
# things like %run work without too many contortions
self._ori_dir = os.path.dirname(test.filename)
# Modified runTest from the default stdlib
def runTest(self):
test = self._dt_test
runner = self._dt_runner
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
try:
# Save our current directory and switch out to the one where the
# test was originally created, in case another doctest did a
# directory change. We'll restore this in the finally clause.
curdir = getcwd()
#print 'runTest in dir:', self._ori_dir # dbg
os.chdir(self._ori_dir)
runner.DIVIDER = "-"*70
failures, tries = runner.run(test,out=new.write,
clear_globs=False)
finally:
sys.stdout = old
os.chdir(curdir)
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def setUp(self):
"""Modified test setup that syncs with ipython namespace"""
#print "setUp test", self._dt_test.examples # dbg
if isinstance(self._dt_test.examples[0], IPExample):
# for IPython examples *only*, we swap the globals with the ipython
# namespace, after updating it with the globals (which doctest
# fills with the necessary info from the module being tested).
self.user_ns_orig = {}
self.user_ns_orig.update(_ip.user_ns)
_ip.user_ns.update(self._dt_test.globs)
# We must remove the _ key in the namespace, so that Python's
# doctest code sets it naturally
_ip.user_ns.pop('_', None)
_ip.user_ns['__builtins__'] = builtin_mod
self._dt_test.globs = _ip.user_ns
super(DocTestCase, self).setUp()
def tearDown(self):
# Undo the test.globs reassignment we made, so that the parent class
# teardown doesn't destroy the ipython namespace
if isinstance(self._dt_test.examples[0], IPExample):
self._dt_test.globs = self._dt_test_globs_ori
_ip.user_ns.clear()
_ip.user_ns.update(self.user_ns_orig)
# XXX - fperez: I am not sure if this is truly a bug in nose 0.11, but
# it does look like one to me: its tearDown method tries to run
#
# delattr(builtin_mod, self._result_var)
#
# without checking that the attribute really is there; it implicitly
# assumes it should have been set via displayhook. But if the
# displayhook was never called, this doesn't necessarily happen. I
# haven't been able to find a little self-contained example outside of
# ipython that would show the problem so I can report it to the nose
# team, but it does happen a lot in our code.
#
# So here, we just protect as narrowly as possible by trapping an
# attribute error whose message would be the name of self._result_var,
# and letting any other error propagate.
try:
super(DocTestCase, self).tearDown()
except AttributeError as exc:
if exc.args[0] != self._result_var:
raise
# A simple subclassing of the original with a different class name, so we can
# distinguish and treat differently IPython examples from pure python ones.
class IPExample(doctest.Example): pass
class IPExternalExample(doctest.Example):
"""Doctest examples to be run in an external process."""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Parent constructor
doctest.Example.__init__(self,source,want,exc_msg,lineno,indent,options)
# An EXTRA newline is needed to prevent pexpect hangs
self.source += '\n'
class IPDocTestParser(doctest.DocTestParser):
"""
A class used to parse strings containing doctest examples.
Note: This is a version modified to properly recognize IPython input and
convert any IPython examples into valid Python ones.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
# Classic Python prompts or default IPython ones
_PS1_PY = r'>>>'
_PS2_PY = r'\.\.\.'
_PS1_IP = r'In\ \[\d+\]:'
_PS2_IP = r'\ \ \ \.\.\.+:'
_RE_TPL = r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
(?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
\n? # a newline
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*%s) # Not a line starting with PS1
(?![ ]*%s) # Not a line starting with PS2
.*$\n? # But any other line
)*)
'''
_EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
re.MULTILINE | re.VERBOSE)
_EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
re.MULTILINE | re.VERBOSE)
# Mark a test as being fully random. In this case, we simply append the
# random marker ('#random') to each individual example's output. This way
# we don't need to modify any other code.
_RANDOM_TEST = re.compile(r'#\s*all-random\s+')
# Mark tests to be executed in an external process - currently unsupported.
_EXTERNAL_IP = re.compile(r'#\s*ipdoctest:\s*EXTERNAL')
def ip2py(self,source):
"""Convert input IPython source into valid Python."""
block = _ip.input_transformer_manager.transform_cell(source)
if len(block.splitlines()) == 1:
return _ip.prefilter(block)
else:
return block
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
#print 'Parse string:\n',string # dbg
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# We make 'all random' tests by adding the '# random' mark to every
# block of output in the test.
if self._RANDOM_TEST.search(string):
random_marker = '\n# random'
else:
random_marker = ''
# Whether to convert the input from ipython to python syntax
ip2py = False
# Find all doctest examples in the string. First, try them as Python
# examples, then as IPython ones
terms = list(self._EXAMPLE_RE_PY.finditer(string))
if terms:
# Normal Python example
#print '-'*70 # dbg
#print 'PyExample, Source:\n',string # dbg
#print '-'*70 # dbg
Example = doctest.Example
else:
# It's an ipython example. Note that IPExamples are run
# in-process, so their syntax must be turned into valid python.
# IPExternalExamples are run out-of-process (via pexpect) so they
# don't need any filtering (a real ipython will be executing them).
terms = list(self._EXAMPLE_RE_IP.finditer(string))
if self._EXTERNAL_IP.search(string):
#print '-'*70 # dbg
#print 'IPExternalExample, Source:\n',string # dbg
#print '-'*70 # dbg
Example = IPExternalExample
else:
#print '-'*70 # dbg
#print 'IPExample, Source:\n',string # dbg
#print '-'*70 # dbg
Example = IPExample
ip2py = True
for m in terms:
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno,ip2py)
# Append the random-output marker (it defaults to empty in most
# cases, it's only non-empty for 'all-random' tests):
want += random_marker
if Example is IPExternalExample:
options[doctest.NORMALIZE_WHITESPACE] = True
want += '\n'
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append(Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options))
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def _parse_example(self, m, name, lineno,ip2py=False):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
Optional:
`ip2py`: if true, filter the input via IPython to convert the syntax
into valid python.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
# We're using variable-length input prompts
ps1 = m.group('ps1')
ps2 = m.group('ps2')
ps1_len = len(ps1)
self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
if ps2:
self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
if ip2py:
# Convert source input from IPython into valid Python syntax
source = self.ip2py(source)
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
# Remove ipython output prompt that might be present in the first line
want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
Note: IPython-modified version which takes the input prompt length as a
parameter, so that prompts of variable length can be dealt with.
"""
space_idx = indent+ps1_len
min_len = space_idx+1
for i, line in enumerate(lines):
if len(line) >= min_len and line[space_idx] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:space_idx], line))
SKIP = doctest.register_optionflag('SKIP')
class IPDocTestRunner(doctest.DocTestRunner,object):
"""Test runner that synchronizes the IPython namespace with test globals.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
# Hack: ipython needs access to the execution context of the example,
# so that it can propagate user variables loaded by %run into
# test.globs. We put them here into our modified %run as a function
# attribute. Our new %run will then only make the namespace update
# when called (rather than unconconditionally updating test.globs here
# for all examples, most of which won't be calling %run anyway).
#_ip._ipdoctest_test_globs = test.globs
#_ip._ipdoctest_test_filename = test.filename
test.globs.update(_ip.user_ns)
return super(IPDocTestRunner,self).run(test,
compileflags,out,clear_globs)
class DocFileCase(doctest.DocFileCase):
"""Overrides to provide filename
"""
def address(self):
return (self._dt_test.filename, None, None)
class ExtensionDoctest(doctests.Doctest):
"""Nose Plugin that supports doctests in extension modules.
"""
name = 'extdoctest' # call nosetests with --with-extdoctest
enabled = True
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
parser.add_option('--doctest-tests', action='store_true',
dest='doctest_tests',
default=env.get('NOSE_DOCTEST_TESTS',True),
help="Also look for doctests in test modules. "
"Note that classes, methods and functions should "
"have either doctests or non-doctest tests, "
"not both. [NOSE_DOCTEST_TESTS]")
parser.add_option('--doctest-extension', action="append",
dest="doctestExtension",
help="Also look for doctests in files with "
"this extension [NOSE_DOCTEST_EXTENSION]")
# Set the default as a list, if given in env; otherwise
# an additional value set on the command line will cause
# an error.
env_setting = env.get('NOSE_DOCTEST_EXTENSION')
if env_setting is not None:
parser.set_defaults(doctestExtension=tolist(env_setting))
def configure(self, options, config):
Plugin.configure(self, options, config)
# Pull standard doctest plugin out of config; we will do doctesting
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
self.doctest_tests = options.doctest_tests
self.extension = tolist(options.doctestExtension)
self.parser = doctest.DocTestParser()
self.finder = DocTestFinder()
self.checker = IPDoctestOutputChecker()
self.globs = None
self.extraglobs = None
def loadTestsFromExtensionModule(self,filename):
bpath,mod = os.path.split(filename)
modname = os.path.splitext(mod)[0]
try:
sys.path.append(bpath)
module = __import__(modname)
tests = list(self.loadTestsFromModule(module))
finally:
sys.path.pop()
return tests
# NOTE: the method below is almost a copy of the original one in nose, with
# a few modifications to control output checking.
def loadTestsFromModule(self, module):
#print '*** ipdoctest - lTM',module # dbg
if not self.matches(module.__name__):
log.debug("Doctest doesn't want module %s", module)
return
tests = self.finder.find(module,globs=self.globs,
extraglobs=self.extraglobs)
if not tests:
return
# always use whitespace and ellipsis options
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
tests.sort()
module_file = module.__file__
if module_file[-4:] in ('.pyc', '.pyo'):
module_file = module_file[:-1]
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
yield DocTestCase(test,
optionflags=optionflags,
checker=self.checker)
def loadTestsFromFile(self, filename):
#print "ipdoctest - from file", filename # dbg
if is_extension_module(filename):
for t in self.loadTestsFromExtensionModule(filename):
yield t
else:
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = open(filename)
try:
doc = dh.read()
finally:
dh.close()
test = self.parser.get_doctest(
doc, globs={'__file__': filename}, name=name,
filename=filename, lineno=0)
if test.examples:
#print 'FileCase:',test.examples # dbg
yield DocFileCase(test)
else:
yield False # no tests to load
class IPythonDoctest(ExtensionDoctest):
"""Nose Plugin that supports doctests in extension modules.
"""
name = 'ipdoctest' # call nosetests with --with-ipdoctest
enabled = True
def makeTest(self, obj, parent):
"""Look for doctests in the given object, which will be a
function, method or class.
"""
#print 'Plugin analyzing:', obj, parent # dbg
# always use whitespace and ellipsis options
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
doctests = self.finder.find(obj, module=getmodule(parent))
if doctests:
for test in doctests:
if len(test.examples) == 0:
continue
yield DocTestCase(test, obj=obj,
optionflags=optionflags,
checker=self.checker)
def options(self, parser, env=os.environ):
#print "Options for nose plugin:", self.name # dbg
Plugin.options(self, parser, env)
parser.add_option('--ipdoctest-tests', action='store_true',
dest='ipdoctest_tests',
default=env.get('NOSE_IPDOCTEST_TESTS',True),
help="Also look for doctests in test modules. "
"Note that classes, methods and functions should "
"have either doctests or non-doctest tests, "
"not both. [NOSE_IPDOCTEST_TESTS]")
parser.add_option('--ipdoctest-extension', action="append",
dest="ipdoctest_extension",
help="Also look for doctests in files with "
"this extension [NOSE_IPDOCTEST_EXTENSION]")
# Set the default as a list, if given in env; otherwise
# an additional value set on the command line will cause
# an error.
env_setting = env.get('NOSE_IPDOCTEST_EXTENSION')
if env_setting is not None:
parser.set_defaults(ipdoctest_extension=tolist(env_setting))
def configure(self, options, config):
#print "Configuring nose plugin:", self.name # dbg
Plugin.configure(self, options, config)
# Pull standard doctest plugin out of config; we will do doctesting
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
self.doctest_tests = options.ipdoctest_tests
self.extension = tolist(options.ipdoctest_extension)
self.parser = IPDocTestParser()
self.finder = DocTestFinder(parser=self.parser)
self.checker = IPDoctestOutputChecker()
self.globs = None
self.extraglobs = None
| mit |
dlorenc/runtimes-common | appengine/runtime_builders/data_integrity.py | 5 | 4257 | #!/usr/bin/python
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import logging
import os
import shutil
import sys
import tempfile
import builder_util
import yaml
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--directory', '-d',
help='directory containing all builder config files',
required=True)
parser.add_argument('--presubmit', '-p', action='store_true',
default=False)
args = parser.parse_args()
return _verify(args.directory, args.presubmit)
def _verify(directory, presubmit):
failures = 0
try:
for config_file in glob.glob(os.path.join(directory, '*.yaml')):
with open(config_file, 'r') as f:
config = yaml.load(f)
project_name = config['project']
latest_file = config.get('latest', '')
if not latest_file:
logging.warn('Project %s has no latest file: skipping',
project_name)
continue
if not presubmit:
failures += _verify_latest_files_match(project_name,
latest_file)
failures += _verify_latest_file_exists(latest_file)
return failures
except ValueError as ve:
logging.error('Error when parsing YAML! Check file formatting. \n{0}'
.format(ve))
except KeyError as ke:
logging.error('Config file {0} is missing required field! \n{1}'
.format(config_file, ke))
def _verify_latest_files_match(project_name, config_latest):
"""
Verify that the file pointed to by <project_name>.version is the same
as the file specified in the builder config
"""
remote_version = builder_util.RUNTIME_BUCKET_PREFIX + \
project_name + '.version'
try:
tmpdir = tempfile.mkdtemp()
version_file = os.path.join(tmpdir, 'runtime.version')
if not builder_util.get_file_from_gcs(remote_version, version_file):
return 1
with open(version_file, 'r') as f:
version_contents = f.read().strip('\n').strip(' ')
version_latest = builder_util.RUNTIME_BUCKET_PREFIX + \
project_name + '-' + version_contents + '.yaml'
if version_latest != config_latest:
logging.error('Builders do not match!')
logging.error('Latest builder in internal runtime config: '
'{0}'.format(config_latest))
logging.error('Latest builder in runtime.version file: '
'{0}'.format(version_latest))
return 1
return 0
finally:
shutil.rmtree(tmpdir)
def _verify_latest_file_exists(latest_file_path):
"""
Verify that the latest file pointed to by <project_name>.version
exists and is valid yaml
"""
try:
logging.info('Checking file {0}'.format(latest_file_path))
tmpdir = tempfile.mkdtemp()
latest_file = os.path.join(tmpdir, 'latest.yaml')
if not builder_util.get_file_from_gcs(latest_file_path, latest_file):
logging.error('File {0} not found in GCS!'
.format(latest_file_path))
return 1
with open(latest_file, 'r') as f:
yaml.load(f)
return 0
except yaml.YAMLError as ye:
logging.error(ye)
return 1
finally:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
petteyg/intellij-community | python/lib/Lib/site-packages/django/template/loader.py | 78 | 8038 | # Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# The loader should return a tuple of (template_source, path). The path returned
# might be shown to the user for debugging purposes, so it should identify where
# the template was loaded from.
#
# A loader may return an already-compiled template instead of the actual
# template source. In that case the path returned should be None, since the
# path information is associated with the template during the compilation,
# which has already been done.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.template.base import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins
from django.utils.importlib import import_module
from django.conf import settings
template_source_loaders = None
class BaseLoader(object):
is_usable = False
def __init__(self, *args, **kwargs):
pass
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
def load_template_source(self, template_name, template_dirs=None):
"""
Returns a tuple containing the source and origin for the given template
name.
"""
raise NotImplementedError
def reset(self):
"""
Resets any state maintained by the loader instance (e.g., cached
templates or cached loader modules).
"""
pass
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def make_origin(display_name, loader, name, dirs):
if settings.TEMPLATE_DEBUG and display_name:
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
def find_template_loader(loader):
if isinstance(loader, (tuple, list)):
loader, args = loader[0], loader[1:]
else:
args = []
if isinstance(loader, basestring):
module, attr = loader.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
try:
TemplateLoader = getattr(mod, attr)
except AttributeError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
if hasattr(TemplateLoader, 'load_template_source'):
func = TemplateLoader(*args)
else:
# Try loading module the old way - string is full path to callable
if args:
raise ImproperlyConfigured("Error importing template source loader %s - can't pass arguments to function-based loader." % loader)
func = TemplateLoader
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % loader)
return None
else:
return func
else:
raise ImproperlyConfigured('Loader does not define a "load_template" callable template source loader')
def find_template(name, dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
global template_source_loaders
if template_source_loaders is None:
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
source, display_name = loader(name, dirs)
return (source, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def find_template_source(name, dirs=None):
# For backward compatibility
import warnings
warnings.warn(
"`django.template.loaders.find_template_source` is deprecated; use `django.template.loaders.find_template` instead.",
DeprecationWarning
)
template, origin = find_template(name, dirs)
if hasattr(template, 'render'):
raise Exception("Found a compiled template that is incompatible with the deprecated `django.template.loaders.find_template_source` function.")
return template, origin
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = get_template_from_string(template, origin, template_name)
return template
def get_template_from_string(source, origin=None, name=None):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source, origin, name)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return t.render(context_instance)
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist:
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(template_name_list))
add_to_builtins('django.template.loader_tags')
| apache-2.0 |
pseudocubic/neutronpy | neutronpy/models.py | 3 | 3939 | # -*- coding: utf-8 -*-
r"""Physical Models
"""
import numpy as np
def simple_harmonic_oscillator(p, t):
r"""Standard equation for a simple harmonic oscillator
Parameters
----------
p : list
Parameters, in the following format:
+-------+----------------------------+
| p[0] | Constant background |
+-------+----------------------------+
| p[1] | Amplitude |
+-------+----------------------------+
| p[2] | Period |
+-------+----------------------------+
| p[3] | Phase |
+-------+----------------------------+
t : ndarray
One dimensional input array
Returns
-------
func : ndarray
One dimensional array
"""
return p[0] + p[1] * np.cos(p[2] * t - p[3])
def damped_harmonic_oscillator(p, t):
r"""Standard equation for a damped harmonic oscillator
Parameters
----------
p : list
Parameters, in the following format:
+-------+----------------------------+
| p[0] | Constant background |
+-------+----------------------------+
| p[1] | Amplitude |
+-------+----------------------------+
| p[2] | Period |
+-------+----------------------------+
| p[3] | Phase |
+-------+----------------------------+
| p[4] | Damping strength |
+-------+----------------------------+
t : ndarray
One dimensional input array
Returns
-------
func : ndarray
One dimensional array
"""
return p[0] + np.exp(-p[4] * t / 2.) * simple_harmonic_oscillator([0, p[1], p[2], p[3]], t)
def acoustic_phonon_dispersion(p, x):
r"""Standard equation for the dispersion of an acoustic phonon
Parameters
----------
p : list
Parameters, in the following format:
+-------+----------------------------+
| p[0] | Dispersion amplitude |
+-------+----------------------------+
| p[1] | Dispersion period |
+-------+----------------------------+
x : ndarray
One dimensional input array
Returns
-------
func : ndarray
One dimensional array
"""
return np.sqrt(4 * p[0]) * np.abs(np.sin(p[1] * x))
def optical_phonon_disperion():
r"""Standard equation for the dispersion of an optical phonon
"""
pass
def ferromagnetic_disperion(p, x):
r"""Standard equation for the dispersion of a ferromagnetic spin excitation
Parameters
----------
p : list
Parameters, in the following format:
+-------+----------------------------+
| p[0] | Dispersion amplitude |
+-------+----------------------------+
| p[1] | Dispersion period |
+-------+----------------------------+
x : ndarray
One dimensional input array
Returns
-------
func : ndarray
One dimensional array
"""
return 4 * p[0] * (1 - np.cos(p[1] * x))
def antiferromagnetic_disperion(p, x):
r"""Standard equation for the dispersion of an antiferromagnetic spin excitation
Parameters
----------
p : list
Parameters, in the following format:
+-------+----------------------------+
| p[0] | Dispersion amplitude |
+-------+----------------------------+
| p[1] | Dispersion period |
+-------+----------------------------+
x : ndarray
One dimensional input array
Returns
-------
func : ndarray
One dimensional array
"""
return p[0] * np.abs(np.sin(p[1] * x))
| mit |
rupran/ansible | contrib/inventory/serf.py | 395 | 3032 | #!/usr/bin/env python
# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.python.org/pypi/serfclient
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.python.org/pypi/serfclient
from serfclient import SerfClient, EnvironmentConfig
try:
import json
except ImportError:
import simplejson as json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 |
trnewman/VT-USRP-daughterboard-drivers | gnuradio-core/src/python/gnuradio/gr/qa_fractional_interpolator.py | 6 | 1212 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_fractional_resampler (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000_make(self):
op = gr.fractional_interpolator_ff(0.0, 1.0)
op2 = gr.fractional_interpolator_cc(0.0, 1.0)
if __name__ == '__main__':
gr_unittest.main()
| gpl-3.0 |
moniqx4/bite-project | deps/gdata-python-client/src/gdata/tlslite/mathtls.py | 273 | 11647 | """Miscellaneous helper functions."""
from utils.compat import *
from utils.cryptomath import *
import hmac
import md5
import sha
#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups]
goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\
(2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\
(2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\
(2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)]
def P_hash(hashModule, secret, seed, length):
bytes = createByteArrayZeros(length)
secret = bytesToString(secret)
seed = bytesToString(seed)
A = seed
index = 0
while 1:
A = hmac.HMAC(secret, A, hashModule).digest()
output = hmac.HMAC(secret, A+seed, hashModule).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def PRF(secret, label, seed, length):
#Split the secret into left and right halves
S1 = secret[ : int(math.ceil(len(secret)/2.0))]
S2 = secret[ int(math.floor(len(secret)/2.0)) : ]
#Run the left half through P_MD5 and the right half through P_SHA1
p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length)
p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length)
#XOR the output values and return the result
for x in range(length):
p_md5[x] ^= p_sha1[x]
return p_md5
def PRF_SSL(secret, seed, length):
secretStr = bytesToString(secret)
seedStr = bytesToString(seed)
bytes = createByteArrayZeros(length)
index = 0
for x in range(26):
A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc..
input = secretStr + sha.sha(A + secretStr + seedStr).digest()
output = md5.md5(input).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def makeX(salt, username, password):
if len(username)>=256:
raise ValueError("username too long")
if len(salt)>=256:
raise ValueError("salt too long")
return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\
.digest()).digest())
#This function is used by VerifierDB.makeVerifier
def makeVerifier(username, password, bits):
bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits]
g,N = goodGroupParameters[bitsIndex]
salt = bytesToString(getRandomBytes(16))
x = makeX(salt, username, password)
verifier = powMod(g, x, N)
return N, g, salt, verifier
def PAD(n, x):
nLength = len(numberToString(n))
s = numberToString(x)
if len(s) < nLength:
s = ("\0" * (nLength-len(s))) + s
return s
def makeU(N, A, B):
return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest())
def makeK(N, g):
return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest())
"""
MAC_SSL
Modified from Python HMAC by Trevor
"""
class MAC_SSL:
"""MAC_SSL class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new MAC_SSL object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
ipad = "\x36" * 40
opad = "\x5C" * 40
self.inner.update(key)
self.inner.update(ipad)
self.outer.update(key)
self.outer.update(opad)
if msg is not None:
self.update(msg)
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = MAC_SSL(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
| apache-2.0 |
duqiao/django | tests/validation/test_unique.py | 337 | 7108 | from __future__ import unicode_literals
import datetime
import unittest
from django.apps.registry import Apps
from django.core.exceptions import ValidationError
from django.db import models
from django.test import TestCase
from .models import (
CustomPKModel, FlexibleDatePost, ModelToValidate, Post, UniqueErrorsModel,
UniqueFieldsModel, UniqueForDateModel, UniqueTogetherModel,
)
class GetUniqueCheckTests(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
([(UniqueFieldsModel, ('id',)),
(UniqueFieldsModel, ('unique_charfield',)),
(UniqueFieldsModel, ('unique_integerfield',))],
[]),
m._get_unique_checks()
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
([(UniqueTogetherModel, ('ifield', 'cfield')),
(UniqueTogetherModel, ('ifield', 'efield')),
(UniqueTogetherModel, ('id',)), ],
[]),
m._get_unique_checks()
)
def test_unique_together_normalization(self):
"""
Test the Meta.unique_together normalization with different sorts of
objects.
"""
data = {
'2-tuple': (('foo', 'bar'),
(('foo', 'bar'),)),
'list': (['foo', 'bar'],
(('foo', 'bar'),)),
'already normalized': ((('foo', 'bar'), ('bar', 'baz')),
(('foo', 'bar'), ('bar', 'baz'))),
'set': ({('foo', 'bar'), ('bar', 'baz')}, # Ref #21469
(('foo', 'bar'), ('bar', 'baz'))),
}
for test_name, (unique_together, normalized) in data.items():
class M(models.Model):
foo = models.IntegerField()
bar = models.IntegerField()
baz = models.IntegerField()
Meta = type(str('Meta'), (), {
'unique_together': unique_together,
'apps': Apps()
})
checks, _ = M()._get_unique_checks()
for t in normalized:
check = (M, t)
self.assertIn(check, checks)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(([(CustomPKModel, ('my_pk_field',))], []), m._get_unique_checks())
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'date', 'count', 'start_date'),
(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks()
)
def test_unique_for_date_exclusion(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks(exclude='start_date')
)
class PerformUniqueChecksTest(TestCase):
def test_primary_key_unique_check_not_performed_when_adding_and_pk_not_specified(self):
# Regression test for #12560
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name='Some Name')
setattr(mtv, '_adding', True)
mtv.full_clean()
def test_primary_key_unique_check_performed_when_adding_and_pk_specified(self):
# Regression test for #12560
with self.assertNumQueries(1):
mtv = ModelToValidate(number=10, name='Some Name', id=123)
setattr(mtv, '_adding', True)
mtv.full_clean()
def test_primary_key_unique_check_not_performed_when_not_adding(self):
# Regression test for #12132
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name='Some Name')
mtv.full_clean()
def test_unique_for_date(self):
Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = Post(title="Django 1.0 is released", posted=datetime.date(2008, 9, 3))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'title': ['Title must be unique for Posted date.']})
# Should work without errors
p = Post(title="Work on Django 1.1 begins", posted=datetime.date(2008, 9, 3))
p.full_clean()
# Should work without errors
p = Post(title="Django 1.0 is released", posted=datetime.datetime(2008, 9, 4))
p.full_clean()
p = Post(slug="Django 1.0", posted=datetime.datetime(2008, 1, 1))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'slug': ['Slug must be unique for Posted year.']})
p = Post(subtitle="Finally", posted=datetime.datetime(2008, 9, 30))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'subtitle': ['Subtitle must be unique for Posted month.']})
p = Post(title="Django 1.0 is released")
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(cm.exception.message_dict, {'posted': ['This field cannot be null.']})
def test_unique_for_date_with_nullable_date(self):
FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = FlexibleDatePost(title="Django 1.0 is released")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_date checks shouldn't trigger when the associated DateField is None.")
p = FlexibleDatePost(slug="Django 1.0")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_year checks shouldn't trigger when the associated DateField is None.")
p = FlexibleDatePost(subtitle="Finally")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_month checks shouldn't trigger when the associated DateField is None.")
def test_unique_errors(self):
UniqueErrorsModel.objects.create(name='Some Name', no=10)
m = UniqueErrorsModel(name='Some Name', no=11)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(cm.exception.message_dict, {'name': ['Custom unique name message.']})
m = UniqueErrorsModel(name='Some Other Name', no=10)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(cm.exception.message_dict, {'no': ['Custom unique number message.']})
| bsd-3-clause |
jackwilsdon/beets | beets/mediafile.py | 6 | 1052 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import mediafile
import warnings
warnings.warn("beets.mediafile is deprecated; use mediafile instead")
# Import everything from the mediafile module into this module.
for key, value in mediafile.__dict__.items():
if key not in ['__name__']:
globals()[key] = value
del key, value, warnings, mediafile
| mit |
T-R0D/JustForFun | aoc2016/aoc2016/day18/solution.py | 1 | 2158 | # This file is part of aoc2016.
#
# aoc2016 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aoc2016 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aoc2016. If not, see <http://www.gnu.org/licenses/>.
SAFE_TILE = '.'
TRAP_TILE = '^'
def part_one(puzzle_input):
return count_safe_tiles_in_room(first_row_of_tiles=puzzle_input, n_rows=40)
def part_two(puzzle_input):
return count_safe_tiles_in_room(first_row_of_tiles=puzzle_input, n_rows=400000)
def count_safe_tiles_in_room(first_row_of_tiles, n_rows):
current_row = list(first_row_of_tiles)
n_safe_tiles = count_safe_tiles(current_row)
for _ in range(n_rows - 1):
current_row = decode_next_row_of_tiles(current_row)
n_safe_tiles += count_safe_tiles((current_row))
return n_safe_tiles
def count_safe_tiles(row_of_tiles):
n_traps = 0
for tile in row_of_tiles:
if tile == SAFE_TILE:
n_traps += 1
return n_traps
def decode_next_row_of_tiles(input_row):
new_row = ['' for _ in range(len(input_row))]
new_row[0] = determine_tile(SAFE_TILE, input_row[0], input_row[1])
new_row[-1] = determine_tile(input_row[-2], input_row[-1], SAFE_TILE)
for i in range(1, len(input_row) - 1):
new_row[i] = determine_tile(*input_row[i - 1: i + 2])
return new_row
def determine_tile(left, center, right):
if (left == TRAP_TILE and center == SAFE_TILE and right == SAFE_TILE) or \
(left == SAFE_TILE and center == SAFE_TILE and right == TRAP_TILE) or \
(left == TRAP_TILE and center == TRAP_TILE and right == SAFE_TILE) or \
(left == SAFE_TILE and center == TRAP_TILE and right == TRAP_TILE):
return TRAP_TILE
return SAFE_TILE
| gpl-2.0 |
joshmoore/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/impexp.py | 1 | 1058 | #!/usr/bin/env python
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from webclient.controller import BaseController
class BaseImpexp(BaseController):
def __init__(self, conn, **kw):
BaseController.__init__(self, conn)
self.eContext['breadcrumb'] = ['Import']
| gpl-2.0 |
havard024/prego | crm/lib/python2.7/site-packages/django/views/decorators/clickjacking.py | 550 | 1759 | from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| mit |
vaygr/ansible | test/units/modules/cloud/amazon/test_api_gateway.py | 45 | 2355 | #
# (c) 2016 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import sys
import pytest
from ansible.module_utils.ec2 import HAS_BOTO3
from units.modules.utils import set_module_args
if not HAS_BOTO3:
pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules")
import ansible.modules.cloud.amazon.aws_api_gateway as agw
exit_return_dict = {}
def fake_exit_json(self, **kwargs):
""" store the kwargs given to exit_json rather than putting them out to stdout"""
global exit_return_dict
exit_return_dict = kwargs
sys.exit(0)
def test_upload_api(monkeypatch):
class FakeConnection:
def put_rest_api(self, *args, **kwargs):
assert kwargs["body"] == "the-swagger-text-is-fake"
return {"msg": "success!"}
def return_fake_connection(*args, **kwargs):
return FakeConnection()
monkeypatch.setattr(agw, "boto3_conn", return_fake_connection)
monkeypatch.setattr(agw.AnsibleModule, "exit_json", fake_exit_json)
set_module_args({
"api_id": "fred",
"state": "present",
"swagger_text": "the-swagger-text-is-fake",
"region": 'mars-north-1',
})
with pytest.raises(SystemExit):
agw.main()
assert exit_return_dict["changed"]
def test_warn_if_region_not_specified():
set_module_args({
"name": "aws_api_gateway",
"state": "present",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
with pytest.raises(SystemExit):
print(agw.main())
| gpl-3.0 |
mindbender-studio/setup | bin/windows/python36/Lib/webbrowser.py | 8 | 21817 | #! /usr/bin/env python3
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import shutil
import sys
import subprocess
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not shutil.which(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, str):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True,
start_new_session=True)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
background = False
redirect_stdout = True
# In remote_args, %s will be replaced with the requested URL. %action will
# be replaced depending on the value of 'new' passed to open.
# remote_action is used for new=0 (open). If newwin is not None, it is
# used for new=1 (open_new). If newtab is not None, it is used for
# new=3 (open_new_tab). After both substitutions are made, any empty
# strings in the transformed remote_args list will be removed.
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = subprocess.DEVNULL
else:
# for TTY browsers, we need stdin/out
inout = None
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, start_new_session=True)
if remote:
# wait at most five seconds. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
try:
rc = p.wait(5)
# if remote call failed, open() will try direct invocation
return not rc
except subprocess.TimeoutExpired:
return True
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=True):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
args = [arg for arg in args if arg]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla browsers."""
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "-new-window"
remote_action_newtab = "-new-tab"
background = True
class Netscape(UnixBrowser):
"""Launcher class for Netscape browser."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Chrome(UnixBrowser):
"Launcher class for Google Chrome browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
Chromium = Chrome
class Opera(UnixBrowser):
"Launcher class for Opera browser."
raise_opts = ["-noraise", ""]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-page"
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=True):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = subprocess.DEVNULL
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except OSError:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except OSError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=True):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# use xdg-open if around
if shutil.which("xdg-open"):
register("xdg-open", None, BackgroundBrowser("xdg-open"))
# The default GNOME3 browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gvfs-open"):
register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and shutil.which("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
if shutil.which("x-www-browser"):
register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
# The Mozilla browsers
for browser in ("firefox", "iceweasel", "iceape", "seamonkey"):
if shutil.which(browser):
register(browser, None, Mozilla(browser))
# The Netscape and old Mozilla browsers
for browser in ("mozilla-firefox",
"mozilla-firebird", "firebird",
"mozilla", "netscape"):
if shutil.which(browser):
register(browser, None, Netscape(browser))
# Konqueror/kfm, the KDE browser.
if shutil.which("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif shutil.which("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if shutil.which(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if shutil.which("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Google Chrome/Chromium browsers
for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
if shutil.which(browser):
register(browser, None, Chrome(browser))
# Opera, quite popular
if shutil.which("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if shutil.which("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if shutil.which("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
if shutil.which("www-browser"):
register("www-browser", None, GenericBrowser("www-browser"))
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if shutil.which("links"):
register("links", None, GenericBrowser("links"))
if shutil.which("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if shutil.which("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if shutil.which("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=True):
try:
os.startfile(url)
except OSError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if shutil.which(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=True):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
class MacOSXOSAScript(BaseBrowser):
def __init__(self, name):
self._name = name
def open(self, url, new=0, autoraise=True):
if self._name == 'default':
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
script = '''
tell application "%s"
activate
open location "%s"
end
'''%(self._name, url.replace('"', '%22'))
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("safari", None, MacOSXOSAScript('safari'), -1)
register("firefox", None, MacOSXOSAScript('firefox'), -1)
register("chrome", None, MacOSXOSAScript('chrome'), -1)
register("MacOSX", None, MacOSXOSAScript('default'), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, -1)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print(usage, file=sys.stderr)
sys.exit(1)
url = args[0]
open(url, new_win)
print("\a")
if __name__ == "__main__":
main()
| mit |
BiznetGIO/horizon | openstack_dashboard/dashboards/project/instances/workflows/update_instance.py | 2 | 5828 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.utils import filters
INDEX_URL = "horizon:projects:instances:index"
ADD_USER_URL = "horizon:projects:instances:create_user"
INSTANCE_SEC_GROUP_SLUG = "update_security_groups"
class UpdateInstanceSecurityGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateInstanceSecurityGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve security group list. '
'Please try again later.')
context = args[0]
instance_id = context.get('instance_id', '')
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = 'member'
# Get list of available security groups
all_groups = []
try:
all_groups = api.neutron.security_group_list(request)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
instance_groups = []
try:
instance_groups = api.neutron.server_security_groups(request,
instance_id)
except Exception:
exceptions.handle(request, err_msg)
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = [group.id
for group in instance_groups]
def handle(self, request, data):
instance_id = data['instance_id']
wanted_groups = map(filters.get_int_or_uuid, data['wanted_groups'])
try:
api.neutron.server_update_security_groups(request, instance_id,
wanted_groups)
except Exception as e:
exceptions.handle(request, str(e))
return False
return True
class Meta(object):
name = _("Security Groups")
slug = INSTANCE_SEC_GROUP_SLUG
class UpdateInstanceSecurityGroups(workflows.UpdateMembersStep):
action_class = UpdateInstanceSecurityGroupsAction
help_text = _("Add and remove security groups to this instance "
"from the list of available security groups.")
available_list_title = _("All Security Groups")
members_list_title = _("Instance Security Groups")
no_available_text = _("No security groups found.")
no_members_text = _("No security groups enabled.")
show_roles = False
depends_on = ("instance_id",)
contributes = ("wanted_groups",)
def allowed(self, request):
return api.base.is_service_enabled(request, 'network')
def contribute(self, data, context):
request = self.workflow.request
if data:
field_name = self.get_member_field_name('member')
context["wanted_groups"] = request.POST.getlist(field_name)
return context
class UpdateInstanceInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"),
max_length=255)
def handle(self, request, data):
try:
api.nova.server_update(request,
data['instance_id'],
data['name'])
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class Meta(object):
name = _("Information")
slug = 'instance_info'
help_text = _("Edit the instance details.")
class UpdateInstanceInfo(workflows.Step):
action_class = UpdateInstanceInfoAction
depends_on = ("instance_id",)
contributes = ("name",)
class UpdateInstance(workflows.Workflow):
slug = "update_instance"
name = _("Edit Instance")
finalize_button_name = _("Save")
success_message = _('Modified instance "%s".')
failure_message = _('Unable to modify instance "%s".')
success_url = "horizon:project:instances:index"
default_steps = (UpdateInstanceInfo,
UpdateInstanceSecurityGroups)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown instance')
# NOTE(kspear): nova doesn't support instance security group management
# by an admin. This isn't really the place for this code,
# but the other ways of special-casing this are even messier.
class AdminUpdateInstance(UpdateInstance):
success_url = "horizon:admin:instances:index"
default_steps = (UpdateInstanceInfo,)
| apache-2.0 |
codrut3/tensorflow | tensorflow/contrib/mpi_collectives/mpi_ops_test.py | 71 | 11058 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.contrib.mpi_collectives.mpi_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import itertools
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
def mpi_env_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a
tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `mpi.rank()` and `mpi.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
"""
rank_env = "PMI_RANK OMPI_COMM_WORLD_RANK".split()
size_env = "PMI_SIZE OMPI_COMM_WORLD_SIZE".split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
class MPITests(tf.test.TestCase):
"""
Tests for MPI ops in tensorflow.contrib.mpi_collectives.
"""
def test_mpi_rank(self):
"""Test that the rank returned by mpi.rank() is correct."""
true_rank, _ = mpi_env_rank_and_size()
with self.test_session() as session:
rank = session.run(mpi.rank())
self.assertEqual(true_rank, rank)
def test_mpi_size(self):
"""Test that the size returned by mpi.size() is correct."""
_, true_size = mpi_env_rank_and_size()
with self.test_session() as session:
size = session.run(mpi.size())
self.assertEqual(true_size, size)
def test_mpi_allreduce_cpu(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
dtypes = [tf.int32, tf.float32]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce produces incorrect results")
def test_mpi_allreduce_gpu(self):
"""Test that the allreduce works on GPUs.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
no_gpus = tf.GPUOptions(visible_device_list="")
cpu_config = tf.ConfigProto(gpu_options=no_gpus)
with self.test_session(config=cpu_config) as session:
local_rank = session.run(mpi.local_rank())
one_gpu = tf.GPUOptions(visible_device_list=str(local_rank))
gpu_config = tf.ConfigProto(gpu_options=one_gpu)
with self.test_session(config=gpu_config) as session:
size = session.run(mpi.size())
dtype = tf.float32
dim = 3
with tf.device("/gpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
return
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce on GPU produces incorrect results")
def test_mpi_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
tf.set_random_seed(1234)
dims = [17 + rank] * 3
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
# Same number of elements, different rank
tf.set_random_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims, dtype=tf.int32 if rank % 2 == 0 else tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim, dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
self.assertEqual(list(gathered_tensor.shape),
[17 * size] + [17] * (dim - 1))
for i in range(size):
rank_tensor = tf.slice(gathered_tensor, [i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
self.assertEqual(list(rank_tensor.shape), [17] * dim)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1),
dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
expected_size = sum(tensor_sizes)
self.assertEqual(list(gathered_tensor.shape),
[expected_size] + [17] * (dim - 1))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(gathered,
[sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = tf.ones(tensor_size, dtype=tf.float32) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
def test_mpi_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
dtype = tf.int32 if rank % 2 == 0 else tf.float32
tensor = tf.ones(tensor_size, dtype=dtype) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
mats116/ElasticBigQuery | dmp/external/simplejson/tests/test_iterable.py | 20 | 1151 | import unittest
from StringIO import StringIO
import simplejson as json
def iter_dumps(obj, **kw):
return ''.join(json.JSONEncoder(**kw).iterencode(obj))
def sio_dump(obj, **kw):
sio = StringIO()
json.dumps(obj, **kw)
return sio.getvalue()
class TestIterable(unittest.TestCase):
def test_iterable(self):
l = [1, 2, 3]
for dumps in (json.dumps, iter_dumps, sio_dump):
expect = dumps(l)
default_expect = dumps(sum(l))
# Default is False
self.assertRaises(TypeError, dumps, iter(l))
self.assertRaises(TypeError, dumps, iter(l), iterable_as_array=False)
self.assertEqual(expect, dumps(iter(l), iterable_as_array=True))
# Ensure that the "default" gets called
self.assertEqual(default_expect, dumps(iter(l), default=sum))
self.assertEqual(default_expect, dumps(iter(l), iterable_as_array=False, default=sum))
# Ensure that the "default" does not get called
self.assertEqual(
default_expect,
dumps(iter(l), iterable_as_array=True, default=sum))
| lgpl-3.0 |
diogenesjf/userinfuser | serverside/tools/utils.py | 12 | 2539 | # Copyright (C) 2011, CloudCaptive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Created on Jan 10, 2011
@author: shan
'''
from google.appengine.ext.webapp import template
from serverside.constants import TEMPLATE_PATHS
from serverside.session import Session
import re
import logging
import string
import random
def generate_random_string(length = 8):
""" Will generate a random uppercase/number string for specified length """
return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(length))
def camelcase_to_friendly_str(s):
"""
Utility to convert camelcase string to a friend string.
For example, camelcase_to_friendly_str("helloWorld") would return "Hello World"
"""
if s == None or len(s) < 1:
return None
ret_str = ""
j=0
for i in range(len(s)):
if str(s[i]).isupper():
ret_str += s[j:i] + " "
j=i
ret_str += s[j:]
import string
ret_str = string.capitalize(ret_str[0]) + ret_str[1:]
return ret_str
def validEmail(email):
"""Check to see if the string is formatted as a valid email address.
"""
if len(email) > 7:
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) != None:
return 1
return 0
def account_login_required(handler_method):
"""
Decorator to check if user is logged in. If user is not logged in they will be redirect to login screen.
"""
def check_login(self, *args):
if self.request.method != 'GET' and self.request.method != 'POST':
self.response.out.write(template.render(TEMPLATE_PATHS.CONSOLE_LOGIN, None))
else:
user_session = Session().get_current_session(self)
if user_session == None:
self.response.out.write(template.render(TEMPLATE_PATHS.CONSOLE_LOGIN, None))
else:
logging.info("LEGIT user session! Email: " + user_session.get_email())
handler_method(self, *args)
return check_login
| gpl-3.0 |
erlimar/prototypeguide | src/lib/markupsafe/tests.py | 155 | 3926 | # -*- coding: utf-8 -*-
import gc
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
| mit |
jelmer/samba | examples/scripts/shares/python/smbparm.py | 40 | 27894 | ######################################################################
##
## autogenerated file of smb.conf parameters
## generate_parm_table <..../param/loadparm.c>
##
## Copyright (C) Gerald Carter 2004.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
######################################################################
from SambaParm import SambaParmString, SambaParmBool, SambaParmBoolRev
## boolean defines for parm_table
P_LOCAL = 0
P_GLOBAL = 1
parm_table = {
"PRELOADMODULES" : ("preload modules", SambaParmString, P_GLOBAL, ""),
"ONLYGUEST" : ("guest only", SambaParmBool, P_LOCAL, "No"),
"PRIVATEDIR" : ("private dir", SambaParmString, P_GLOBAL, "/etc/samba/private"),
"HIDESPECIALFILES" : ("hide special files", SambaParmBool, P_LOCAL, "No"),
"WINBINDENUMUSERS" : ("winbind enum users", SambaParmBool, P_GLOBAL, "Yes"),
"TIMESTAMPLOGS" : ("debug timestamp", SambaParmBool, P_GLOBAL, "Yes"),
"LDAPPASSWDSYNC" : ("ldap passwd sync", SambaParmString, P_GLOBAL, "no"),
"READBMPX" : ("read bmpx", SambaParmBool, P_GLOBAL, "No"),
"PASSWORDSERVER" : ("password server", SambaParmString, P_GLOBAL, "*"),
"COPY" : ("copy", SambaParmString, P_LOCAL, ""),
"MAXXMIT" : ("max xmit", SambaParmString, P_GLOBAL, "16644"),
"MINPRINTSPACE" : ("min print space", SambaParmString, P_LOCAL, "0"),
"CASESENSITIVE" : ("case sensitive", SambaParmString, P_LOCAL, "Auto"),
"LDAPIDMAPSUFFIX" : ("ldap idmap suffix", SambaParmString, P_GLOBAL, ""),
"NAMECACHETIMEOUT" : ("name cache timeout", SambaParmString, P_GLOBAL, "660"),
"MAPARCHIVE" : ("map archive", SambaParmBool, P_LOCAL, "Yes"),
"LANMANAUTH" : ("lanman auth", SambaParmBool, P_GLOBAL, "Yes"),
"NETBIOSSCOPE" : ("netbios scope", SambaParmString, P_GLOBAL, ""),
"MAXREPORTEDPRINTJOBS" : ("max reported print jobs", SambaParmString, P_LOCAL, "0"),
"CREATEMODE" : ("create mask", SambaParmString, P_LOCAL, "0744"),
"READLIST" : ("read list", SambaParmString, P_LOCAL, ""),
"WINBINDNESTEDGROUPS" : ("winbind nested groups", SambaParmBool, P_GLOBAL, "No"),
"COMMENT" : ("comment", SambaParmString, P_LOCAL, ""),
"PRINTER" : ("printer name", SambaParmString, P_LOCAL, ""),
"LMANNOUNCE" : ("lm announce", SambaParmString, P_GLOBAL, "Auto"),
"SYSLOGONLY" : ("syslog only", SambaParmBool, P_GLOBAL, "No"),
"LMINTERVAL" : ("lm interval", SambaParmString, P_GLOBAL, "60"),
"MANGLINGMETHOD" : ("mangling method", SambaParmString, P_GLOBAL, "hash2"),
"PROFILEACLS" : ("profile acls", SambaParmBool, P_LOCAL, "No"),
"LDAPGROUPSUFFIX" : ("ldap group suffix", SambaParmString, P_GLOBAL, ""),
"MAPTOGUEST" : ("map to guest", SambaParmString, P_GLOBAL, "Never"),
"NULLPASSWORDS" : ("null passwords", SambaParmBool, P_GLOBAL, "No"),
"ONLYUSER" : ("only user", SambaParmBool, P_LOCAL, "No"),
"UTMP" : ("utmp", SambaParmBool, P_GLOBAL, "No"),
"DONTDESCEND" : ("dont descend", SambaParmString, P_LOCAL, ""),
"PRINTING" : ("printing", SambaParmString, P_LOCAL, "cups"),
"SOCKETOPTIONS" : ("socket options", SambaParmString, P_GLOBAL, "TCP_NODELAY"),
"CLIENTUSESPNEGO" : ("client use spnego", SambaParmBool, P_GLOBAL, "Yes"),
"USESPNEGO" : ("use spnego", SambaParmBool, P_GLOBAL, "Yes"),
"FAKEOPLOCKS" : ("fake oplocks", SambaParmBool, P_LOCAL, "No"),
"FORCECREATEMODE" : ("force create mode", SambaParmString, P_LOCAL, "00"),
"SMBPORTS" : ("smb ports", SambaParmString, P_GLOBAL, "445 139"),
"LOCKDIR" : ("lock directory", SambaParmString, P_GLOBAL, "/var/lib/samba"),
"BROWSEABLE" : ("browseable", SambaParmBool, P_LOCAL, "Yes"),
"WINSPROXY" : ("wins proxy", SambaParmBool, P_GLOBAL, "No"),
"FORCEGROUP" : ("force group", SambaParmString, P_LOCAL, ""),
"NTSTATUSSUPPORT" : ("nt status support", SambaParmBool, P_GLOBAL, "Yes"),
"EXEC" : ("preexec", SambaParmString, P_LOCAL, ""),
"DOMAINLOGONS" : ("domain logons", SambaParmBool, P_GLOBAL, "No"),
"TEMPLATESHELL" : ("template shell", SambaParmString, P_GLOBAL, "/bin/false"),
"USESENDFILE" : ("use sendfile", SambaParmBool, P_LOCAL, "No"),
"USEMMAP" : ("use mmap", SambaParmBool, P_GLOBAL, "Yes"),
"VALIDUSERS" : ("valid users", SambaParmString, P_LOCAL, ""),
"DEBUGLEVEL" : ("log level", SambaParmString, P_GLOBAL, "0"),
"PRINTCAPCACHETIME" : ("printcap cache time", SambaParmString, P_GLOBAL, "0"),
"SOCKETADDRESS" : ("socket address", SambaParmString, P_GLOBAL, "0.0.0.0"),
"FORCEDIRECTORYMODE" : ("force directory mode", SambaParmString, P_LOCAL, "00"),
"MSDFSROOT" : ("msdfs root", SambaParmBool, P_LOCAL, "No"),
"ROOTPREEXEC" : ("root preexec", SambaParmString, P_LOCAL, ""),
"WRITEOK" : ("read only", SambaParmBoolRev, P_LOCAL, "Yes"),
"MAXLOGSIZE" : ("max log size", SambaParmString, P_GLOBAL, "5000"),
"VFSOBJECT" : ("vfs objects", SambaParmString, P_LOCAL, ""),
"CHECKPASSWORDSCRIPT" : ("check password script", SambaParmString, P_GLOBAL, ""),
"DELETEPRINTERCOMMAND" : ("deleteprinter command", SambaParmString, P_GLOBAL, ""),
"OSLEVEL" : ("os level", SambaParmString, P_GLOBAL, "20"),
"ENUMPORTSCOMMAND" : ("enumports command", SambaParmString, P_GLOBAL, ""),
"DELETEUSERFROMGROUPSCRIPT": ("delete user from group script", SambaParmString, P_GLOBAL, ""),
"IDMAPGID" : ("idmap gid", SambaParmString, P_GLOBAL, ""),
"PREEXECCLOSE" : ("preexec close", SambaParmBool, P_LOCAL, "No"),
"UTMPDIRECTORY" : ("utmp directory", SambaParmString, P_GLOBAL, ""),
"DOSFILEMODE" : ("dos filemode", SambaParmBool, P_LOCAL, "No"),
"LOGFILE" : ("log file", SambaParmString, P_GLOBAL, ""),
"WORKGROUP" : ("workgroup", SambaParmString, P_GLOBAL, "WORKGROUP"),
"ENCRYPTPASSWORDS" : ("encrypt passwords", SambaParmBool, P_GLOBAL, "Yes"),
"PRINTABLE" : ("printable", SambaParmBool, P_LOCAL, "No"),
"MAXPROTOCOL" : ("max protocol", SambaParmString, P_GLOBAL, "NT1"),
"KERNELOPLOCKS" : ("kernel oplocks", SambaParmBool, P_GLOBAL, "Yes"),
"NETBIOSALIASES" : ("netbios aliases", SambaParmString, P_GLOBAL, ""),
"ANNOUNCEAS" : ("announce as", SambaParmString, P_GLOBAL, "NT"),
"DIRECTORYMASK" : ("directory mask", SambaParmString, P_LOCAL, "0755"),
"MAPSYSTEM" : ("map system", SambaParmBool, P_LOCAL, "No"),
"CHANGENOTIFYTIMEOUT" : ("change notify timeout", SambaParmString, P_GLOBAL, "60"),
"WINBINDTRUSTEDDOMAINSONLY": ("winbind trusted domains only", SambaParmBool, P_GLOBAL, "No"),
"SHUTDOWNSCRIPT" : ("shutdown script", SambaParmString, P_GLOBAL, ""),
"FOLLOWSYMLINKS" : ("follow symlinks", SambaParmBool, P_LOCAL, "Yes"),
"MAPHIDDEN" : ("map hidden", SambaParmBool, P_LOCAL, "No"),
"GROUP" : ("force group", SambaParmString, P_LOCAL, ""),
"DENYHOSTS" : ("hosts deny", SambaParmString, P_LOCAL, ""),
"WINBINDCACHETIME" : ("winbind cache time", SambaParmString, P_GLOBAL, "300"),
"DELETEVETOFILES" : ("delete veto files", SambaParmBool, P_LOCAL, "No"),
"DISABLESPOOLSS" : ("disable spoolss", SambaParmBool, P_GLOBAL, "No"),
"MAXCONNECTIONS" : ("max connections", SambaParmString, P_LOCAL, "0"),
"WRITERAW" : ("write raw", SambaParmBool, P_GLOBAL, "Yes"),
"SERVERSIGNING" : ("server signing", SambaParmString, P_GLOBAL, "No"),
"VOLUME" : ("volume", SambaParmString, P_LOCAL, ""),
"UNIXPASSWORDSYNC" : ("unix password sync", SambaParmBool, P_GLOBAL, "No"),
"OBEYPAMRESTRICTIONS" : ("obey pam restrictions", SambaParmBool, P_GLOBAL, "No"),
"PASSWDCHATTIMEOUT" : ("passwd chat timeout", SambaParmString, P_GLOBAL, "2"),
"USER" : ("username", SambaParmString, P_LOCAL, ""),
"HIDEDOTFILES" : ("hide dot files", SambaParmBool, P_LOCAL, "Yes"),
"ROOTPOSTEXEC" : ("root postexec", SambaParmString, P_LOCAL, ""),
"PROTOCOL" : ("max protocol", SambaParmString, P_GLOBAL, "NT1"),
"LDAPADMINDN" : ("ldap admin dn", SambaParmString, P_GLOBAL, ""),
"DNSPROXY" : ("dns proxy", SambaParmBool, P_GLOBAL, "Yes"),
"OS2DRIVERMAP" : ("os2 driver map", SambaParmString, P_GLOBAL, ""),
"QUEUERESUMECOMMAND" : ("queueresume command", SambaParmString, P_LOCAL, ""),
"SERVERSCHANNEL" : ("server schannel", SambaParmString, P_GLOBAL, "Auto"),
"IDMAPUID" : ("idmap uid", SambaParmString, P_GLOBAL, ""),
"WINBINDENABLELOCALACCOUNTS": ("winbind enable local accounts", SambaParmBool, P_GLOBAL, "No"),
"PRINTERNAME" : ("printer name", SambaParmString, P_LOCAL, ""),
"NTACLSUPPORT" : ("nt acl support", SambaParmBool, P_LOCAL, "Yes"),
"LOGLEVEL" : ("log level", SambaParmString, P_GLOBAL, "0"),
"STATCACHE" : ("stat cache", SambaParmBool, P_GLOBAL, "Yes"),
"LPQCACHETIME" : ("lpq cache time", SambaParmString, P_GLOBAL, "30"),
"LEVEL2OPLOCKS" : ("level2 oplocks", SambaParmBool, P_LOCAL, "Yes"),
"LARGEREADWRITE" : ("large readwrite", SambaParmBool, P_GLOBAL, "Yes"),
"LDAPREPLICATIONSLEEP" : ("ldap replication sleep", SambaParmString, P_GLOBAL, "1000"),
"LDAPUSERSUFFIX" : ("ldap user suffix", SambaParmString, P_GLOBAL, ""),
"NETBIOSNAME" : ("netbios name", SambaParmString, P_GLOBAL, "PANTHER"),
"LOCKSPINCOUNT" : ("lock spin count", SambaParmString, P_GLOBAL, "3"),
"OPLOCKS" : ("oplocks", SambaParmBool, P_LOCAL, "Yes"),
"MINWINSTTL" : ("min wins ttl", SambaParmString, P_GLOBAL, "21600"),
"HOMEDIRMAP" : ("homedir map", SambaParmString, P_GLOBAL, ""),
"REMOTEANNOUNCE" : ("remote announce", SambaParmString, P_GLOBAL, ""),
"PREFERREDMASTER" : ("preferred master", SambaParmString, P_GLOBAL, "Auto"),
"SECURITY" : ("security", SambaParmString, P_GLOBAL, "USER"),
"AUTHMETHODS" : ("auth methods", SambaParmString, P_GLOBAL, ""),
"ENABLERIDALGORITHM" : ("enable rid algorithm", SambaParmBool, P_GLOBAL, "Yes"),
"LPRMCOMMAND" : ("lprm command", SambaParmString, P_LOCAL, ""),
"KERNELCHANGENOTIFY" : ("kernel change notify", SambaParmBool, P_GLOBAL, "Yes"),
"LOGONSCRIPT" : ("logon script", SambaParmString, P_GLOBAL, ""),
"PRESERVECASE" : ("preserve case", SambaParmBool, P_LOCAL, "Yes"),
"UNIXCHARSET" : ("unix charset", SambaParmString, P_GLOBAL, "UTF-8"),
"FORCEPRINTERNAME" : ("force printername", SambaParmBool, P_LOCAL, "No"),
"LDAPFILTER" : ("ldap filter", SambaParmString, P_GLOBAL, "(uid"),
"DELETEREADONLY" : ("delete readonly", SambaParmBool, P_LOCAL, "No"),
"ABORTSHUTDOWNSCRIPT" : ("abort shutdown script", SambaParmString, P_GLOBAL, ""),
"DFREECOMMAND" : ("dfree command", SambaParmString, P_GLOBAL, ""),
"VETOFILES" : ("veto files", SambaParmString, P_LOCAL, ""),
"LOCKING" : ("locking", SambaParmBool, P_LOCAL, "Yes"),
"EASUPPORT" : ("ea support", SambaParmBool, P_LOCAL, "No"),
"MAXSMBDPROCESSES" : ("max smbd processes", SambaParmString, P_GLOBAL, "0"),
"HIDEFILES" : ("hide files", SambaParmString, P_LOCAL, ""),
"PASSWDCHATDEBUG" : ("passwd chat debug", SambaParmBool, P_GLOBAL, "No"),
"SMBPASSWDFILE" : ("smb passwd file", SambaParmString, P_GLOBAL, "/etc/samba/private/smbpasswd"),
"GETQUOTACOMMAND" : ("get quota command", SambaParmString, P_GLOBAL, ""),
"OPLOCKCONTENTIONLIMIT" : ("oplock contention limit", SambaParmString, P_LOCAL, "2"),
"DOMAINMASTER" : ("domain master", SambaParmString, P_GLOBAL, "Auto"),
"DELETESHARECOMMAND" : ("delete share command", SambaParmString, P_GLOBAL, ""),
"INVALIDUSERS" : ("invalid users", SambaParmString, P_LOCAL, ""),
"POSIXLOCKING" : ("posix locking", SambaParmBool, P_LOCAL, "Yes"),
"INCLUDE" : ("include", SambaParmString, P_LOCAL, ""),
"ALGORITHMICRIDBASE" : ("algorithmic rid base", SambaParmString, P_GLOBAL, "1000"),
"ANNOUNCEVERSION" : ("announce version", SambaParmString, P_GLOBAL, "4.9"),
"USERNAMEMAP" : ("username map", SambaParmString, P_GLOBAL, ""),
"MANGLEDNAMES" : ("mangled names", SambaParmBool, P_LOCAL, "Yes"),
"ROOTDIRECTORY" : ("root directory", SambaParmString, P_GLOBAL, ""),
"DEBUGHIRESTIMESTAMP" : ("debug hires timestamp", SambaParmBool, P_GLOBAL, "No"),
"LOGONDRIVE" : ("logon drive", SambaParmString, P_GLOBAL, ""),
"LOCALMASTER" : ("local master", SambaParmBool, P_GLOBAL, "Yes"),
"ROOTPREEXECCLOSE" : ("root preexec close", SambaParmBool, P_LOCAL, "No"),
"CONFIGFILE" : ("config file", SambaParmString, P_GLOBAL, ""),
"USECLIENTDRIVER" : ("use client driver", SambaParmBool, P_LOCAL, "No"),
"MINPROTOCOL" : ("min protocol", SambaParmString, P_GLOBAL, "CORE"),
"ADDUSERTOGROUPSCRIPT" : ("add user to group script", SambaParmString, P_GLOBAL, ""),
"MAPACLINHERIT" : ("map acl inherit", SambaParmBool, P_LOCAL, "No"),
"DELETEUSERSCRIPT" : ("delete user script", SambaParmString, P_GLOBAL, ""),
"WINBINDUID" : ("idmap uid", SambaParmString, P_GLOBAL, ""),
"READRAW" : ("read raw", SambaParmBool, P_GLOBAL, "Yes"),
"WINBINDENUMGROUPS" : ("winbind enum groups", SambaParmBool, P_GLOBAL, "Yes"),
"MAXPRINTJOBS" : ("max print jobs", SambaParmString, P_LOCAL, "1000"),
"PRINTCAP" : ("printcap name", SambaParmString, P_GLOBAL, ""),
"LOADPRINTERS" : ("load printers", SambaParmBool, P_GLOBAL, "Yes"),
"DEFAULT" : ("default service", SambaParmString, P_GLOBAL, ""),
"GUESTACCOUNT" : ("guest account", SambaParmString, P_GLOBAL, "nobody"),
"AUTOSERVICES" : ("preload", SambaParmString, P_GLOBAL, ""),
"WRITEABLE" : ("read only", SambaParmBoolRev, P_LOCAL, "Yes"),
"CLIENTLANMANAUTH" : ("client lanman auth", SambaParmBool, P_GLOBAL, "Yes"),
"MESSAGECOMMAND" : ("message command", SambaParmString, P_GLOBAL, ""),
"UNIXEXTENSIONS" : ("unix extensions", SambaParmBool, P_GLOBAL, "Yes"),
"LDAPPASSWORDSYNC" : ("ldap passwd sync", SambaParmString, P_GLOBAL, "no"),
"AFSUSERNAMEMAP" : ("afs username map", SambaParmString, P_GLOBAL, ""),
"SYSLOG" : ("syslog", SambaParmString, P_GLOBAL, "1"),
"SETPRIMARYGROUPSCRIPT" : ("set primary group script", SambaParmString, P_GLOBAL, ""),
"DEADTIME" : ("deadtime", SambaParmString, P_GLOBAL, "0"),
"RESTRICTANONYMOUS" : ("restrict anonymous", SambaParmString, P_GLOBAL, "0"),
"USERNAMELEVEL" : ("username level", SambaParmString, P_GLOBAL, "0"),
"DISPLAYCHARSET" : ("display charset", SambaParmString, P_GLOBAL, "LOCALE"),
"FORCEUSER" : ("force user", SambaParmString, P_LOCAL, ""),
"HOSTSDENY" : ("hosts deny", SambaParmString, P_LOCAL, ""),
"HIDEUNWRITEABLEFILES" : ("hide unwriteable files", SambaParmBool, P_LOCAL, "No"),
"DOSCHARSET" : ("dos charset", SambaParmString, P_GLOBAL, "CP850"),
"DOSFILETIMES" : ("dos filetimes", SambaParmBool, P_LOCAL, "No"),
"REALM" : ("realm", SambaParmString, P_GLOBAL, ""),
"LDAPSUFFIX" : ("ldap suffix", SambaParmString, P_GLOBAL, ""),
"LPPAUSECOMMAND" : ("lppause command", SambaParmString, P_LOCAL, ""),
"FAKEDIRECTORYCREATETIMES": ("fake directory create times", SambaParmBool, P_LOCAL, "No"),
"MAGICSCRIPT" : ("magic script", SambaParmString, P_LOCAL, ""),
"WRITECACHESIZE" : ("write cache size", SambaParmString, P_LOCAL, "0"),
"BLOCKSIZE" : ("block size", SambaParmString, P_LOCAL, "1024"),
"LOCKSPINTIME" : ("lock spin time", SambaParmString, P_GLOBAL, "10"),
"ACLCOMPATIBILITY" : ("acl compatibility", SambaParmString, P_GLOBAL, ""),
"MSDFSPROXY" : ("msdfs proxy", SambaParmString, P_LOCAL, ""),
"POSTEXEC" : ("postexec", SambaParmString, P_LOCAL, ""),
"HIDEUNREADABLE" : ("hide unreadable", SambaParmBool, P_LOCAL, "No"),
"WIDELINKS" : ("wide links", SambaParmBool, P_LOCAL, "Yes"),
"STRICTSYNC" : ("strict sync", SambaParmBool, P_LOCAL, "No"),
"PRINTCAPNAME" : ("printcap name", SambaParmString, P_GLOBAL, ""),
"PREFEREDMASTER" : ("preferred master", SambaParmString, P_GLOBAL, "Auto"),
"MAXMUX" : ("max mux", SambaParmString, P_GLOBAL, "50"),
"VETOOPLOCKFILES" : ("veto oplock files", SambaParmString, P_LOCAL, ""),
"WINBINDSEPARATOR" : ("winbind separator", SambaParmString, P_GLOBAL, "\\"),
"NISHOMEDIR" : ("NIS homedir", SambaParmBool, P_GLOBAL, "No"),
"AVAILABLE" : ("available", SambaParmBool, P_LOCAL, "Yes"),
"KEEPALIVE" : ("keepalive", SambaParmString, P_GLOBAL, "300"),
"USERNAME" : ("username", SambaParmString, P_LOCAL, ""),
"PRINTCOMMAND" : ("print command", SambaParmString, P_LOCAL, ""),
"LPRESUMECOMMAND" : ("lpresume command", SambaParmString, P_LOCAL, ""),
"USEKERBEROSKEYTAB" : ("use kerberos keytab", SambaParmBool, P_GLOBAL, "No"),
"HOSTSALLOW" : ("hosts allow", SambaParmString, P_LOCAL, ""),
"MAXOPENFILES" : ("max open files", SambaParmString, P_GLOBAL, "10000"),
"PARANOIDSERVERSECURITY" : ("paranoid server security", SambaParmBool, P_GLOBAL, "Yes"),
"WTMPDIRECTORY" : ("wtmp directory", SambaParmString, P_GLOBAL, ""),
"ADDPRINTERCOMMAND" : ("addprinter command", SambaParmString, P_GLOBAL, ""),
"WINSSERVER" : ("wins server", SambaParmString, P_GLOBAL, ""),
"LDAPTIMEOUT" : ("ldap timeout", SambaParmString, P_GLOBAL, "15"),
"LOCKDIRECTORY" : ("lock directory", SambaParmString, P_GLOBAL, "/var/lib/samba"),
"LOGONHOME" : ("logon home", SambaParmString, P_GLOBAL, "\\%N\%U"),
"MINPASSWDLENGTH" : ("min password length", SambaParmString, P_GLOBAL, "5"),
"CLIENTPLAINTEXTAUTH" : ("client plaintext auth", SambaParmBool, P_GLOBAL, "Yes"),
"CSCPOLICY" : ("csc policy", SambaParmString, P_LOCAL, "manual"),
"ADDSHARECOMMAND" : ("add share command", SambaParmString, P_GLOBAL, ""),
"MANGLINGCHAR" : ("mangling char", SambaParmString, P_LOCAL, "~"),
"DIRECTORY" : ("path", SambaParmString, P_LOCAL, ""),
"DEBUGTIMESTAMP" : ("debug timestamp", SambaParmBool, P_GLOBAL, "Yes"),
"ALLOWHOSTS" : ("hosts allow", SambaParmString, P_LOCAL, ""),
"FSTYPE" : ("fstype", SambaParmString, P_LOCAL, "NTFS"),
"BLOCKINGLOCKS" : ("blocking locks", SambaParmBool, P_LOCAL, "Yes"),
"LDAPSSL" : ("ldap ssl", SambaParmString, P_GLOBAL, ""),
"PAMPASSWORDCHANGE" : ("pam password change", SambaParmBool, P_GLOBAL, "No"),
"GUESTOK" : ("guest ok", SambaParmBool, P_LOCAL, "No"),
"DEFAULTDEVMODE" : ("default devmode", SambaParmBool, P_LOCAL, "No"),
"MAXDISKSIZE" : ("max disk size", SambaParmString, P_GLOBAL, "0"),
"ADDMACHINESCRIPT" : ("add machine script", SambaParmString, P_GLOBAL, ""),
"MANGLEPREFIX" : ("mangle prefix", SambaParmString, P_GLOBAL, "1"),
"DISABLENETBIOS" : ("disable netbios", SambaParmBool, P_GLOBAL, "No"),
"LOGONPATH" : ("logon path", SambaParmString, P_GLOBAL, "\\%N\%U\profile"),
"IDMAPBACKEND" : ("idmap backend", SambaParmString, P_GLOBAL, ""),
"SHORTPRESERVECASE" : ("short preserve case", SambaParmBool, P_LOCAL, "Yes"),
"CUPSSERVER" : ("cups server", SambaParmString, P_GLOBAL, ""),
"NTPIPESUPPORT" : ("nt pipe support", SambaParmBool, P_GLOBAL, "Yes"),
"READONLY" : ("read only", SambaParmBool, P_LOCAL, "Yes"),
"MACHINEPASSWORDTIMEOUT" : ("machine password timeout", SambaParmString, P_GLOBAL, "604800"),
"PIDDIRECTORY" : ("pid directory", SambaParmString, P_GLOBAL, "/var/run"),
"PUBLIC" : ("guest ok", SambaParmBool, P_LOCAL, "No"),
"DEBUGPID" : ("debug pid", SambaParmBool, P_GLOBAL, "No"),
"GUESTONLY" : ("guest only", SambaParmBool, P_LOCAL, "No"),
"DELETEGROUPSCRIPT" : ("delete group script", SambaParmString, P_GLOBAL, ""),
"CUPSOPTIONS" : ("cups options", SambaParmString, P_LOCAL, ""),
"PASSWDCHAT" : ("passwd chat", SambaParmString, P_GLOBAL, "*new*password* %n\n *new*password* %n\n *changed*"),
"STRICTLOCKING" : ("strict locking", SambaParmString, P_LOCAL, "Yes"),
"TEMPLATEHOMEDIR" : ("template homedir", SambaParmString, P_GLOBAL, "/home/%D/%U"),
"WINBINDGID" : ("idmap gid", SambaParmString, P_GLOBAL, ""),
"INHERITPERMISSIONS" : ("inherit permissions", SambaParmBool, P_LOCAL, "No"),
"TIMESERVER" : ("time server", SambaParmBool, P_GLOBAL, "No"),
"BROWSELIST" : ("browse list", SambaParmBool, P_GLOBAL, "Yes"),
"HOSTNAMELOOKUPS" : ("hostname lookups", SambaParmBool, P_GLOBAL, "No"),
"DOSFILETIMERESOLUTION" : ("dos filetime resolution", SambaParmBool, P_LOCAL, "No"),
"CREATEMASK" : ("create mask", SambaParmString, P_LOCAL, "0744"),
"WINSHOOK" : ("wins hook", SambaParmString, P_GLOBAL, ""),
"DEFAULTCASE" : ("default case", SambaParmString, P_LOCAL, "lower"),
"PATH" : ("path", SambaParmString, P_LOCAL, ""),
"SHOWADDPRINTERWIZARD" : ("show add printer wizard", SambaParmBool, P_GLOBAL, "Yes"),
"WINSPARTNERS" : ("wins partners", SambaParmString, P_GLOBAL, ""),
"ENABLEPRIVILEGES" : ("enable privileges", SambaParmBool, P_GLOBAL, "No"),
"VFSOBJECTS" : ("vfs objects", SambaParmString, P_LOCAL, ""),
"STRICTALLOCATE" : ("strict allocate", SambaParmBool, P_LOCAL, "No"),
"PREEXEC" : ("preexec", SambaParmString, P_LOCAL, ""),
"WINSSUPPORT" : ("wins support", SambaParmBool, P_GLOBAL, "No"),
"HOSTMSDFS" : ("host msdfs", SambaParmBool, P_GLOBAL, "No"),
"AFSTOKENLIFETIME" : ("afs token lifetime", SambaParmString, P_GLOBAL, "604800"),
"PRINTOK" : ("printable", SambaParmBool, P_LOCAL, "No"),
"TEMPLATEPRIMARYGROUP" : ("template primary group", SambaParmString, P_GLOBAL, "nobody"),
"PASSWDPROGRAM" : ("passwd program", SambaParmString, P_GLOBAL, ""),
"SYNCALWAYS" : ("sync always", SambaParmBool, P_LOCAL, "No"),
"QUEUEPAUSECOMMAND" : ("queuepause command", SambaParmString, P_LOCAL, ""),
"BINDINTERFACESONLY" : ("bind interfaces only", SambaParmBool, P_GLOBAL, "No"),
"MAXWINSTTL" : ("max wins ttl", SambaParmString, P_GLOBAL, "518400"),
"GETWDCACHE" : ("getwd cache", SambaParmBool, P_GLOBAL, "Yes"),
"MAGICOUTPUT" : ("magic output", SambaParmString, P_LOCAL, ""),
"ADMINUSERS" : ("admin users", SambaParmString, P_LOCAL, ""),
"DIRECTORYMODE" : ("directory mask", SambaParmString, P_LOCAL, "0755"),
"CLIENTSIGNING" : ("client signing", SambaParmString, P_GLOBAL, "auto"),
"PASSDBBACKEND" : ("passdb backend", SambaParmString, P_GLOBAL, "smbpasswd"),
"CASESIGNAMES" : ("case sensitive", SambaParmString, P_LOCAL, "Auto"),
"SETQUOTACOMMAND" : ("set quota command", SambaParmString, P_GLOBAL, ""),
"LPQCOMMAND" : ("lpq command", SambaParmString, P_LOCAL, ""),
"SERVERSTRING" : ("server string", SambaParmString, P_GLOBAL, "Samba 3.0.11pre2-SVN-build-4840"),
"DEFAULTSERVICE" : ("default service", SambaParmString, P_GLOBAL, ""),
"WINBINDUSEDEFAULTDOMAIN": ("winbind use default domain", SambaParmBool, P_GLOBAL, "No"),
"INTERFACES" : ("interfaces", SambaParmString, P_GLOBAL, ""),
"ROOTDIR" : ("root directory", SambaParmString, P_GLOBAL, ""),
"ADDUSERSCRIPT" : ("add user script", SambaParmString, P_GLOBAL, ""),
"CLIENTNTLMV2AUTH" : ("client NTLMv2 auth", SambaParmBool, P_GLOBAL, "No"),
"FORCEUNKNOWNACLUSER" : ("force unknown acl user", SambaParmBool, P_LOCAL, "No"),
"MANGLEDMAP" : ("mangled map", SambaParmString, P_LOCAL, ""),
"NTLMAUTH" : ("ntlm auth", SambaParmBool, P_GLOBAL, "Yes"),
"INHERITACLS" : ("inherit acls", SambaParmBool, P_LOCAL, "No"),
"HOSTSEQUIV" : ("hosts equiv", SambaParmString, P_GLOBAL, ""),
"ALLOWTRUSTEDDOMAINS" : ("allow trusted domains", SambaParmBool, P_GLOBAL, "Yes"),
"MINPASSWORDLENGTH" : ("min password length", SambaParmString, P_GLOBAL, "5"),
"USERS" : ("username", SambaParmString, P_LOCAL, ""),
"PRELOAD" : ("preload", SambaParmString, P_GLOBAL, ""),
"DEBUGUID" : ("debug uid", SambaParmBool, P_GLOBAL, "No"),
"CHANGESHARECOMMAND" : ("change share command", SambaParmString, P_GLOBAL, ""),
"BROWSABLE" : ("browseable", SambaParmBool, P_LOCAL, "Yes"),
"ENHANCEDBROWSING" : ("enhanced browsing", SambaParmBool, P_GLOBAL, "Yes"),
"PANICACTION" : ("panic action", SambaParmString, P_GLOBAL, ""),
"LDAPMACHINESUFFIX" : ("ldap machine suffix", SambaParmString, P_GLOBAL, ""),
"MAXTTL" : ("max ttl", SambaParmString, P_GLOBAL, "259200"),
"WRITABLE" : ("read only", SambaParmBoolRev, P_LOCAL, "Yes"),
"SHAREMODES" : ("share modes", SambaParmBool, P_LOCAL, "Yes"),
"REMOTEBROWSESYNC" : ("remote browse sync", SambaParmString, P_GLOBAL, ""),
"STOREDOSATTRIBUTES" : ("store dos attributes", SambaParmBool, P_LOCAL, "No"),
"CLIENTSCHANNEL" : ("client schannel", SambaParmString, P_GLOBAL, "Auto"),
"WRITELIST" : ("write list", SambaParmString, P_LOCAL, ""),
"ADDGROUPSCRIPT" : ("add group script", SambaParmString, P_GLOBAL, ""),
"OPLOCKBREAKWAITTIME" : ("oplock break wait time", SambaParmString, P_GLOBAL, "0"),
"TIMEOFFSET" : ("time offset", SambaParmString, P_GLOBAL, "0"),
"LDAPDELETEDN" : ("ldap delete dn", SambaParmBool, P_GLOBAL, "No"),
"AFSSHARE" : ("afs share", SambaParmBool, P_LOCAL, "No"),
"ROOT" : ("root directory", SambaParmString, P_GLOBAL, ""),
"NAMERESOLVEORDER" : ("name resolve order", SambaParmString, P_GLOBAL, "lmhosts wins host bcast"),
}
##### end of smbparm.y ##########################################
#################################################################
| gpl-3.0 |
tracierenea/gnuradio | docs/doxygen/doxyxml/text.py | 333 | 1832 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Utilities for extracting text from generated classes.
"""
def is_string(txt):
if isinstance(txt, str):
return True
try:
if isinstance(txt, unicode):
return True
except NameError:
pass
return False
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
result = ''.join(contents)
elif hasattr(obj, 'content_'):
contents = [description_bit(item) for item in obj.content_]
result = ''.join(contents)
elif hasattr(obj, 'value'):
result = description_bit(obj.value)
elif is_string(obj):
return obj
else:
raise StandardError('Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
return result
| gpl-3.0 |
helldorado/ansible | lib/ansible/modules/windows/win_scheduled_task_stat.py | 31 | 10444 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_scheduled_task_stat
version_added: "2.5"
short_description: Get information about Windows Scheduled Tasks
description:
- Will return whether the folder and task exists.
- Returns the names of tasks in the folder specified.
- Use M(win_scheduled_task) to configure a scheduled task.
options:
path:
description: The folder path where the task lives.
type: str
default: \
name:
description:
- The name of the scheduled task to get information for.
- If C(name) is set and exists, will return information on the task itself.
type: str
seealso:
- module: win_scheduled_task
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Get information about a folder
win_scheduled_task_stat:
path: \folder name
register: task_folder_stat
- name: Get information about a task in the root folder
win_scheduled_task_stat:
name: task name
register: task_stat
- name: Get information about a task in a custom folder
win_scheduled_task_stat:
path: \folder name
name: task name
register: task_stat
'''
RETURN = r'''
actions:
description: A list of actions.
returned: name is specified and task exists
type: list
sample: [
{
"Arguments": "/c echo hi",
"Id": null,
"Path": "cmd.exe",
"Type": "TASK_ACTION_EXEC",
"WorkingDirectory": null
}
]
folder_exists:
description: Whether the folder set at path exists.
returned: always
type: bool
sample: true
folder_task_count:
description: The number of tasks that exist in the folder.
returned: always
type: int
sample: 2
folder_task_names:
description: A list of tasks that exist in the folder.
returned: always
type: list
sample: [ 'Task 1', 'Task 2' ]
principal:
description: Details on the principal configured to run the task.
returned: name is specified and task exists
type: complex
contains:
display_name:
description: The name of the user/group that is displayed in the Task
Scheduler UI.
returned: ''
type: str
sample: Administrator
group_id:
description: The group that will run the task.
returned: ''
type: str
sample: BUILTIN\Administrators
id:
description: The ID for the principal.
returned: ''
type: str
sample: Author
logon_type:
description: The logon method that the task will run with.
returned: ''
type: str
sample: TASK_LOGON_INTERACTIVE_TOKEN
run_level:
description: The level of user rights used to run the task.
returned: ''
type: str
sample: TASK_RUNLEVEL_LUA
user_id:
description: The user that will run the task.
returned: ''
type: str
sample: SERVER\Administrator
registration_info:
description: Details on the task registration info.
returned: name is specified and task exists
type: complex
contains:
author:
description: The author os the task.
returned: ''
type: str
sample: SERVER\Administrator
date:
description: The date when the task was register.
returned: ''
type: str
sample: '2017-01-01T10:00:00'
description:
description: The description of the task.
returned: ''
type: str
sample: task description
documentation:
description: The documentation of the task.
returned: ''
type: str
sample: task documentation
security_descriptor:
description: The security descriptor of the task.
returned: ''
type: str
sample: security descriptor
source:
description: The source of the task.
returned: ''
type: str
sample: source
uri:
description: The URI/path of the task.
returned: ''
type: str
sample: \task\task name
version:
description: The version of the task.
returned: ''
type: str
sample: 1.0
settings:
description: Details on the task settings.
returned: name is specified and task exists
type: complex
contains:
allow_demand_start:
description: Whether the task can be started by using either the Run
command of the Context menu.
returned: ''
type: bool
sample: true
allow_hard_terminate:
description: Whether the task can terminated by using TerminateProcess.
returned: ''
type: bool
sample: true
compatibility:
description: The compatibility level of the task
returned: ''
type: int
sample: 2
delete_expired_task_after:
description: The amount of time the Task Scheduler will wait before
deleting the task after it expires.
returned: ''
type: str
sample: PT10M
disallow_start_if_on_batteries:
description: Whether the task will not be started if the computer is
running on battery power.
returned: ''
type: bool
sample: false
disallow_start_on_remote_app_session:
description: Whether the task will not be started when in a remote app
session.
returned: ''
type: bool
sample: true
enabled:
description: Whether the task is enabled.
returned: ''
type: bool
sample: true
execution_time_limit:
description: The amount of time allowed to complete the task.
returned: ''
type: str
sample: PT72H
hidden:
description: Whether the task is hidden in the UI.
returned: ''
type: bool
sample: false
idle_settings:
description: The idle settings of the task.
returned: ''
type: dict
sample: {
"idle_duration": "PT10M",
"restart_on_idle": false,
"stop_on_idle_end": true,
"wait_timeout": "PT1H"
}
maintenance_settings:
description: The maintenance settings of the task.
returned: ''
type: str
sample: null
mulitple_instances:
description: Indicates the behaviour when starting a task that is already
running.
returned: ''
type: int
sample: 2
network_settings:
description: The network settings of the task.
returned: ''
type: dict
sample: {
"id": null,
"name": null
}
priority:
description: The priority level of the task.
returned: ''
type: int
sample: 7
restart_count:
description: The number of times that the task will attempt to restart
on failures.
returned: ''
type: int
sample: 0
restart_interval:
description: How long the Task Scheduler will attempt to restart the
task.
returned: ''
type: str
sample: PT15M
run_only_id_idle:
description: Whether the task will run if the computer is in an idle
state.
returned: ''
type: bool
sample: true
run_only_if_network_available:
description: Whether the task will run only when a network is available.
returned: ''
type: bool
sample: false
start_when_available:
description: Whether the task can start at any time after its scheduled
time has passed.
returned: ''
type: bool
sample: false
stop_if_going_on_batteries:
description: Whether the task will be stopped if the computer begins to
run on battery power.
returned: ''
type: bool
sample: true
use_unified_scheduling_engine:
description: Whether the task will use the unifed scheduling engine.
returned: ''
type: bool
sample: false
volatile:
description: Whether thet ask is volatile.
returned: ''
type: bool
sample: false
wake_to_run:
description: Whether the task will wake the computer when it is time to
run the task.
returned: ''
type: bool
sample: false
state:
description: Details on the state of the task
returned: name is specified and task exists
type: complex
contains:
last_run_time:
description: The time the registered task was last run.
returned: ''
type: str
sample: '2017-09-20T20:50:00'
last_task_result:
description: The results that were returned the last time the task was
run.
returned: ''
type: int
sample: 267009
next_run_time:
description: The time when the task is next scheduled to run.
returned: ''
type: str
sample: '2017-09-20T22:50:00'
number_of_missed_runs:
description: The number of times a task has missed a scheduled run.
returned: ''
type: int
sample: 1
status:
description: The status of the task, whether it is running, stopped, etc.
returned: ''
type: str
sample: TASK_STATE_RUNNING
task_exists:
description: Whether the task at the folder exists.
returned: name is specified
type: bool
sample: true
triggers:
description: A list of triggers.
returned: name is specified and task exists
type: list
sample: [
{
"delay": "PT15M",
"enabled": true,
"end_boundary": null,
"execution_time_limit": null,
"id": null,
"repetition": {
"duration": null,
"interval": null,
"stop_at_duration_end": false
},
"start_boundary": null,
"type": "TASK_TRIGGER_BOOT"
},
{
"days_of_month": "5,15,30",
"enabled": true,
"end_boundary": null,
"execution_time_limit": null,
"id": null,
"months_of_year": "june,december",
"random_delay": null,
"repetition": {
"duration": null,
"interval": null,
"stop_at_duration_end": false
},
"run_on_last_day_of_month": true,
"start_boundary": "2017-09-20T03:44:38",
"type": "TASK_TRIGGER_MONTHLY"
}
]
'''
| gpl-3.0 |
wenbinf/ndkale | kale/tests/test_task.py | 1 | 9275 | """Module testing the kale.task module."""
import mock
import unittest
from kale import exceptions
from kale import task
from kale import test_utils
class TaskFailureTestCase(unittest.TestCase):
"""Test handle_failure logic."""
def _create_patch(self, name):
"""Helper method for creating scoped mocks."""
patcher = mock.patch(name)
patch = patcher.start()
self.addCleanup(patcher.stop)
return patch
def testRunWorker(self):
"""Test running a task."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
post_run = self._create_patch('kale.task.Task._post_run')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
task_inst = test_utils.new_mock_task(task_class=test_utils.MockTask)
task_args = [1, 'a']
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
post_run.assert_called_once_with(*task_args)
clean_env.assert_called_once_with(
task_id='mock_task', task_name='kale.test_utils.MockTask')
def testRunWorkerFailTask(self):
"""Test running a task."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
post_run = self._create_patch('kale.task.Task._post_run')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
task_inst._start_time = 1
task_args = [1, 'a']
with self.assertRaises(exceptions.TaskException) as exc_ctxt_mngr:
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
assert not post_run.called, '_post_run should not have been called.'
clean_env.assert_called_once_with(
task_id='fail_task', task_name='kale.test_utils.FailTask',
exc=exc_ctxt_mngr.exception)
self.assertTrue(task_inst._end_time > 0)
self.assertTrue(task_inst._task_latency_sec > 0)
def testTaskUnrecoverableException(self):
"""Task task failing with unrecoverable exception."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
message = test_utils.MockMessage(task_inst)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = SyntaxError('Unrecoverable Error')
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_UNRECOVERABLE, True)
def testTaskNoRetries(self):
"""Task task failing with retries disabled."""
task_inst = test_utils.new_mock_task(
task_class=test_utils.FailTaskNoRetries)
message = test_utils.MockMessage(task_inst)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
retried = test_utils.FailTaskNoRetries.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_NO_RETRY, True)
def testTaskRetriesExceeded(self):
"""Task task failing with retries exceeded."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
message = test_utils.MockMessage(
task_inst, retry_num=test_utils.FailTask.max_retries)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_RETRIES_EXCEEDED, False)
def testTaskRetries(self):
"""Task task failing with retries exceeded."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
message = test_utils.MockMessage(
task_inst, retry_num=test_utils.FailTask.max_retries)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_RETRIES_EXCEEDED, False)
def testTaskRuntimeExceeded(self):
"""Task task failing from timeout."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
sample_values = [
(i, test_utils.FailTask._get_delay_sec_for_retry(i)) for i in
range(task_inst.max_retries)]
payload = {
'args': [],
'kwargs': {},
'app_data': {}}
for retry, delay_sec in sample_values:
with mock.patch(
'kale.publisher.Publisher.publish') as publish_func:
message = test_utils.MockMessage(task_inst, retry_num=retry)
retried = test_utils.FailTask.handle_failure(
message, exceptions.TaskException('Exception'))
self.assertTrue(retried)
publish_func.assert_called_once_with(
test_utils.FailTask, message.task_id, payload,
current_retry_num=(retry + 1), delay_sec=delay_sec)
retry = retry + 1
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
message = test_utils.MockMessage(task_inst, retry_num=retry)
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_RETRIES_EXCEEDED, False)
def testTargetRuntimeExceeded(self):
"""Task task target runtime exceeded."""
task_inst = test_utils.new_mock_task(
task_class=test_utils.SlowButNotTooSlowTask)
with mock.patch(
'kale.task.Task._alert_runtime_exceeded') as time_exceeded:
task_inst.run()
self.assertTrue(time_exceeded.called)
def testBlacklistedTaskFails(self):
"""Test that a blacklisted task raises an exception."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
run_task = self._create_patch('kale.task.Task.run_task')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
check_blacklist = self._create_patch('kale.task.Task._check_blacklist')
raised_exc = exceptions.BlacklistedException()
check_blacklist.side_effect = raised_exc
task_inst = test_utils.new_mock_task(task_class=test_utils.MockTask)
task_inst._start_time = 1
task_args = [1, 'a']
with self.assertRaises(exceptions.BlacklistedException):
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
self.assertFalse(run_task.called)
clean_env.assert_called_once_with(
task_id='mock_task', task_name='kale.test_utils.MockTask',
exc=raised_exc)
def testBlacklistedTaskNoRetries(self):
"""Test that a blacklisted task raises an exception."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
run_task = self._create_patch('kale.task.Task.run_task')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
check_blacklist = self._create_patch('kale.task.Task._check_blacklist')
raised_exc = exceptions.BlacklistedException()
check_blacklist.side_effect = raised_exc
mock_message = test_utils.new_mock_message(
task_class=test_utils.MockTask)
task_inst = mock_message.task_inst
task_inst._start_time = 1
task_args = [1, 'a']
with self.assertRaises(exceptions.BlacklistedException):
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
self.assertFalse(run_task.called)
clean_env.assert_called_once_with(
task_id='mock_task', task_name='kale.test_utils.MockTask',
exc=raised_exc)
# Check that task
permanent_failure = not task_inst.__class__.handle_failure(
mock_message, raised_exc)
self.assertTrue(permanent_failure)
| bsd-2-clause |
axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/unit_tests/test_credentials.py | 6 | 8612 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class Test_get_credentials(unittest.TestCase):
def _call_fut(self):
from google.cloud import credentials
return credentials.get_credentials()
def test_it(self):
with mock.patch('google.auth.default', autospec=True) as default:
default.return_value = (
mock.sentinel.credentials, mock.sentinel.project)
found = self._call_fut()
self.assertIs(found, mock.sentinel.credentials)
default.assert_called_once_with()
class Test_generate_signed_url(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud.credentials import generate_signed_url
return generate_signed_url(*args, **kwargs)
def _generate_helper(self, response_type=None, response_disposition=None,
generation=None):
import base64
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlsplit
import google.auth.credentials
from google.cloud._testing import _Monkey
from google.cloud import credentials as MUT
ENDPOINT = 'http://api.example.com'
RESOURCE = '/name/path'
SIGNED = base64.b64encode(b'DEADBEEF')
CREDENTIALS = mock.Mock(spec=google.auth.credentials.Signing)
CREDENTIALS.signer_email = 'service@example.com'
def _get_signed_query_params(*args):
credentials, expiration = args[:2]
return {
'GoogleAccessId': credentials.signer_email,
'Expires': str(expiration),
'Signature': SIGNED,
}
with _Monkey(MUT, _get_signed_query_params=_get_signed_query_params):
url = self._call_fut(CREDENTIALS, RESOURCE, 1000,
api_access_endpoint=ENDPOINT,
response_type=response_type,
response_disposition=response_disposition,
generation=generation)
scheme, netloc, path, qs, frag = urlsplit(url)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'api.example.com')
self.assertEqual(path, RESOURCE)
params = parse_qs(qs)
# In Py3k, parse_qs gives us text values:
self.assertEqual(params.pop('Signature'), [SIGNED.decode('ascii')])
self.assertEqual(params.pop('Expires'), ['1000'])
self.assertEqual(params.pop('GoogleAccessId'),
[CREDENTIALS.signer_email])
if response_type is not None:
self.assertEqual(params.pop('response-content-type'),
[response_type])
if response_disposition is not None:
self.assertEqual(params.pop('response-content-disposition'),
[response_disposition])
if generation is not None:
self.assertEqual(params.pop('generation'), [generation])
# Make sure we have checked them all.
self.assertEqual(len(params), 0)
self.assertEqual(frag, '')
def test_w_expiration_int(self):
self._generate_helper()
def test_w_custom_fields(self):
response_type = 'text/plain'
response_disposition = 'attachment; filename=blob.png'
generation = '123'
self._generate_helper(response_type=response_type,
response_disposition=response_disposition,
generation=generation)
class Test_generate_signed_url_exception(unittest.TestCase):
def test_with_google_credentials(self):
import time
import google.auth.credentials
from google.cloud.credentials import generate_signed_url
RESOURCE = '/name/path'
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
expiration = int(time.time() + 5)
self.assertRaises(AttributeError, generate_signed_url, credentials,
resource=RESOURCE, expiration=expiration)
class Test__get_signed_query_params(unittest.TestCase):
def _call_fut(self, credentials, expiration, string_to_sign):
from google.cloud.credentials import _get_signed_query_params
return _get_signed_query_params(credentials, expiration,
string_to_sign)
def test_it(self):
import base64
import google.auth.credentials
SIG_BYTES = b'DEADBEEF'
ACCOUNT_NAME = mock.sentinel.service_account_email
CREDENTIALS = mock.Mock(spec=google.auth.credentials.Signing)
CREDENTIALS.signer_email = ACCOUNT_NAME
CREDENTIALS.sign_bytes.return_value = SIG_BYTES
EXPIRATION = 100
STRING_TO_SIGN = 'dummy_signature'
result = self._call_fut(CREDENTIALS, EXPIRATION,
STRING_TO_SIGN)
self.assertEqual(result, {
'GoogleAccessId': ACCOUNT_NAME,
'Expires': str(EXPIRATION),
'Signature': base64.b64encode(b'DEADBEEF'),
})
CREDENTIALS.sign_bytes.assert_called_once_with(STRING_TO_SIGN)
class Test__get_expiration_seconds(unittest.TestCase):
def _call_fut(self, expiration):
from google.cloud.credentials import _get_expiration_seconds
return _get_expiration_seconds(expiration)
def _utc_seconds(self, when):
import calendar
return int(calendar.timegm(when.timetuple()))
def test_w_invalid(self):
self.assertRaises(TypeError, self._call_fut, object())
self.assertRaises(TypeError, self._call_fut, None)
def test_w_int(self):
self.assertEqual(self._call_fut(123), 123)
def test_w_long(self):
try:
long
except NameError: # pragma: NO COVER Py3K
pass
else:
self.assertEqual(self._call_fut(long(123)), 123)
def test_w_naive_datetime(self):
import datetime
expiration_no_tz = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = self._utc_seconds(expiration_no_tz)
self.assertEqual(self._call_fut(expiration_no_tz), utc_seconds)
def test_w_utc_datetime(self):
import datetime
from google.cloud._helpers import UTC
expiration_utc = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
utc_seconds = self._utc_seconds(expiration_utc)
self.assertEqual(self._call_fut(expiration_utc), utc_seconds)
def test_w_other_zone_datetime(self):
import datetime
from google.cloud._helpers import _UTC
class CET(_UTC):
_tzname = 'CET'
_utcoffset = datetime.timedelta(hours=1)
zone = CET()
expiration_other = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, zone)
utc_seconds = self._utc_seconds(expiration_other)
cet_seconds = utc_seconds - (60 * 60) # CET one hour earlier than UTC
self.assertEqual(self._call_fut(expiration_other), cet_seconds)
def test_w_timedelta_seconds(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud import credentials as MUT
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = self._utc_seconds(dummy_utcnow)
expiration_as_delta = datetime.timedelta(seconds=10)
with _Monkey(MUT, _NOW=lambda: dummy_utcnow):
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, utc_seconds + 10)
def test_w_timedelta_days(self):
import datetime
from google.cloud._testing import _Monkey
from google.cloud import credentials as MUT
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = self._utc_seconds(dummy_utcnow)
expiration_as_delta = datetime.timedelta(days=1)
with _Monkey(MUT, _NOW=lambda: dummy_utcnow):
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, utc_seconds + 86400)
| apache-2.0 |
salfab/CouchPotatoServer | libs/subliminal/services/tvsubtitles.py | 106 | 6240 | # -*- coding: utf-8 -*-
# Copyright 2012 Nicolas Wack <wackou@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..cache import cachedmethod
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import re
logger = logging.getLogger(__name__)
def match(pattern, string):
try:
return re.search(pattern, string).group(1)
except AttributeError:
logger.debug(u'Could not match %r on %r' % (pattern, string))
return None
class TvSubtitles(ServiceBase):
server_url = 'http://www.tvsubtitles.net'
api_based = False
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
'zh', 'pt-br'])
#TODO: Find more exceptions
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
'cn': Language('chi')}
videos = [Episode]
require_video = False
required_features = ['permissive']
@cachedmethod
def get_likely_series_id(self, name):
r = self.session.post('%s/search.php' % self.server_url, data={'q': name})
soup = BeautifulSoup(r.content, self.required_features)
maindiv = soup.find('div', 'left')
results = []
for elem in maindiv.find_all('li'):
sid = int(match('tvshow-([0-9]+)\.html', elem.a['href']))
show_name = match('(.*) \(', elem.a.text)
results.append((show_name, sid))
#TODO: pick up the best one in a smart way
result = results[0]
return result[1]
@cachedmethod
def get_episode_id(self, series_id, season, number):
"""Get the TvSubtitles id for the given episode. Raises KeyError if none
could be found."""
# download the page of the season, contains ids for all episodes
episode_id = None
r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season))
soup = BeautifulSoup(r.content, self.required_features)
table = soup.find('table', id='table5')
for row in table.find_all('tr'):
cells = row.find_all('td')
if not cells:
continue
episode_number = match('x([0-9]+)', cells[0].text)
if not episode_number:
continue
episode_number = int(episode_number)
episode_id = int(match('episode-([0-9]+)', cells[1].a['href']))
# we could just return the id of the queried episode, but as we
# already downloaded the whole page we might as well fill in the
# information for all the episodes of the season
self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id)
# raises KeyError if not found
return self.cached_value(self.get_episode_id, args=(series_id, season, number))
# Do not cache this method in order to always check for the most recent
# subtitles
def get_sub_ids(self, episode_id):
subids = []
r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id))
epsoup = BeautifulSoup(r.content, self.required_features)
for subdiv in epsoup.find_all('a'):
if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'):
continue
subid = int(match('([0-9]+)', subdiv['href']))
lang = self.get_language(match('flags/(.*).gif', subdiv.img['src']))
result = {'subid': subid, 'language': lang}
for p in subdiv.find_all('p'):
if 'alt' in p.attrs and p['alt'] == 'rip':
result['rip'] = p.text.strip()
if 'alt' in p.attrs and p['alt'] == 'release':
result['release'] = p.text.strip()
subids.append(result)
return subids
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
def query(self, filepath, languages, keywords, series, season, episode):
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
self.init_cache()
sid = self.get_likely_series_id(series.lower())
try:
ep_id = self.get_episode_id(sid, season, episode)
except KeyError:
logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode))
return []
subids = self.get_sub_ids(ep_id)
# filter the subtitles with our queried languages
subtitles = []
for subid in subids:
language = subid['language']
if language not in languages:
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']),
keywords=[subid['rip'], subid['release']])
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = TvSubtitles
| gpl-3.0 |
jnerin/ansible | lib/ansible/modules/network/nxos/nxos_overlay_global.py | 20 | 5917 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_overlay_global
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Configures anycast gateway MAC of the switch.
description:
- Configures anycast gateway MAC of the switch.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default restores params default value
- Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE",
"EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE"
options:
anycast_gateway_mac:
description:
- Anycast gateway mac of the switch.
required: true
default: null
'''
EXAMPLES = '''
- nxos_overlay_global:
anycast_gateway_mac: "b.b.b"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["fabric forwarding anycast-gateway-mac 000B.000B.000B"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac',
}
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.findall(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
value = ''
if has_command:
value = has_command[0]
existing[arg] = value
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if value:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, proposed in proposed_commands.items():
existing_value = existing_commands.get(key)
if proposed == 'default' and existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
elif 'anycast-gateway-mac' in key and proposed != 'default':
proposed = normalize_mac(proposed, module)
existing_value = normalize_mac(existing_value, module)
if proposed != existing_value:
command = '{0} {1}'.format(key, proposed)
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def normalize_mac(proposed_mac, module):
if proposed_mac is None:
return ''
try:
if '-' in proposed_mac:
splitted_mac = proposed_mac.split('-')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
elif '.' in proposed_mac:
splitted_mac = []
splitted_dot_mac = proposed_mac.split('.')
if len(splitted_dot_mac) != 3:
raise ValueError
for octect in splitted_dot_mac:
if len(octect) > 4:
raise ValueError
else:
octect_len = len(octect)
padding = 4 - octect_len
splitted_mac.append(octect.zfill(padding + 1))
elif ':' in proposed_mac:
splitted_mac = proposed_mac.split(':')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
else:
raise ValueError
except ValueError:
module.fail_json(msg='Invalid MAC address format', proposed_mac=proposed_mac)
joined_mac = ''.join(splitted_mac)
mac = [joined_mac[i:i + 4] for i in range(0, len(joined_mac), 4)]
return '.'.join(mac).upper()
def main():
argument_spec = dict(
anycast_gateway_mac=dict(required=True, type='str'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
costalfy/Vega-Strike | data/cgi-accountserver/register_submit.py | 3 | 2329 | #! /usr/bin/python
import sys
import os
import string
import cgitb; cgitb.enable()
import db
import settings
header = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>Account Creation Results</title>
</head>
<body>
"""
footer = """
</body>
</html>
"""
get_form=False
post_form=True
def execute(dbconn, mod, form):
query = ''
if mod:
query = '?'+mod
print("Content-Type: text/html")
print("")
errorvar="<br><a href=\"register.py"+query+"\">Return To Account Creation Form</a>"
success = False
failed_error = "Unknown error"
try:
username = dbconn.check_string( form.get("username",'') )
password = dbconn.check_string( form.get("password",'') )
faction = dbconn.check_string( form.get("faction",'') )
type = dbconn.check_string( form.get("type",'') )
if not dbconn.check_password(username, password, True):
raise db.DBError("Error password for username "+username+" does not match our records")
dbconn.modify_account(username, type, faction)
success = True
except db.DBError:
success = False
failed_error = sys.exc_info()[1]
print(header)
print('<br>')
if success:
url = 'vsconfig.py?username='+username+'&password='+password+'&mod='+mod
print("To play, start up the game, click on 'Multiplayer', type in your login and password, and then click 'Join Game'")
#print 'Download a functional'
#print '<a href="'+url+'" title="Download config file">vegastrike.config</a>'
#print 'to put in your vegastrike folder that has your appropriate login and password<br>'
#print 'Or, <a href="'+url+'&action=view">view the config file</a> in your browser.<br>'
else:
print('<b>Registration Error</b>: ' + str(failed_error))
print(errorvar)
print(footer)
if __name__=='__main__':
post_args = {}
if os.environ.get('REQUEST_METHOD','GET') == 'POST':
leng = os.environ['CONTENT_LENGTH']
post_args = db.urlDecode(sys.stdin.read(int(leng)))
if "mod" in post_args:
mod = post_args["mod"]
else:
mod = os.environ.get('QUERY_STRING','')
conn = db.connect(settings.dbconfig, mod)
if post_args:
execute(conn, mod, post_args)
| gpl-2.0 |
pyfa-org/Pyfa | gui/builtinViews/implantEditor.py | 1 | 12699 | import re
# noinspection PyPackageRequirements
import wx
# noinspection PyPackageRequirements
from wx.lib.buttons import GenBitmapButton
import gui.builtinMarketBrowser.pfSearchBox as SBox
import gui.display as d
from gui.bitmap_loader import BitmapLoader
from gui.marketBrowser import SearchBox
from service.market import Market
def stripHtml(text):
text = re.sub('<\s*br\s*/?\s*>', '\n', text)
text = re.sub('</?[^/]+?(/\s*)?>', '', text)
return text
class BaseImplantEditorView(wx.Panel):
def addMarketViewImage(self, iconFile):
if iconFile is None:
return -1
bitmap = BitmapLoader.getBitmap(iconFile, "icons")
if bitmap is None:
return -1
else:
return self.availableImplantsImageList.Add(bitmap)
def __init__(self, parent):
wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.TAB_TRAVERSAL)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
pmainSizer = wx.BoxSizer(wx.HORIZONTAL)
availableSizer = wx.BoxSizer(wx.VERTICAL)
self.searchBox = SearchBox(self)
self.itemView = ItemView(self)
self.itemView.Hide()
availableSizer.Add(self.searchBox, 0, wx.EXPAND)
availableSizer.Add(self.itemView, 1, wx.EXPAND)
self.availableImplantsTree = wx.TreeCtrl(self, wx.ID_ANY, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT)
root = self.availableRoot = self.availableImplantsTree.AddRoot("Available")
self.availableImplantsImageList = wx.ImageList(16, 16)
self.availableImplantsTree.SetImageList(self.availableImplantsImageList)
availableSizer.Add(self.availableImplantsTree, 1, wx.EXPAND)
pmainSizer.Add(availableSizer, 1, wx.ALL | wx.EXPAND, 5)
buttonSizer = wx.BoxSizer(wx.VERTICAL)
buttonSizer.AddStretchSpacer()
self.btnAdd = GenBitmapButton(self, wx.ID_ADD, BitmapLoader.getBitmap("fit_add_small", "gui"),
style=wx.BORDER_NONE)
buttonSizer.Add(self.btnAdd, 0)
self.btnRemove = GenBitmapButton(self, wx.ID_REMOVE, BitmapLoader.getBitmap("fit_delete_small", "gui"),
style=wx.BORDER_NONE)
buttonSizer.Add(self.btnRemove, 0)
buttonSizer.AddStretchSpacer()
pmainSizer.Add(buttonSizer, 0, wx.EXPAND, 0)
characterImplantSizer = wx.BoxSizer(wx.VERTICAL)
self.pluggedImplantsTree = AvailableImplantsView(self)
characterImplantSizer.Add(self.pluggedImplantsTree, 1, wx.ALL | wx.EXPAND, 5)
pmainSizer.Add(characterImplantSizer, 1, wx.EXPAND, 5)
self.SetSizer(pmainSizer)
self.hoveredLeftTreeTypeID = None
self.hoveredRightListRow = None
# Populate the market tree
sMkt = Market.getInstance()
for mktGrp in sMkt.getImplantTree():
iconId = self.addMarketViewImage(sMkt.getIconByMarketGroup(mktGrp))
childId = self.availableImplantsTree.AppendItem(root, mktGrp.name, iconId, data=mktGrp.ID)
if sMkt.marketGroupHasTypesCheck(mktGrp) is False:
self.availableImplantsTree.AppendItem(childId, "dummy")
self.availableImplantsTree.SortChildren(self.availableRoot)
# Bind the event to replace dummies by real data
self.availableImplantsTree.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.expandLookup)
self.availableImplantsTree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.itemSelected)
self.availableImplantsTree.Bind(wx.EVT_MOTION, self.OnLeftTreeMouseMove)
self.availableImplantsTree.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeftTreeMouseLeave)
self.itemView.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.itemSelected)
self.pluggedImplantsTree.Bind(wx.EVT_MOTION, self.OnRightListMouseMove)
# Bind add & remove buttons
self.btnAdd.Bind(wx.EVT_BUTTON, self.itemSelected)
self.btnRemove.Bind(wx.EVT_BUTTON, self.removeItem)
# We update with an empty list first to set the initial size for Layout(), then update later with actual
# implants for character. This helps with sizing issues.
self.pluggedImplantsTree.update([])
self.bindContext()
self.Layout()
self.update()
def bindContext(self):
# Binds self.contextChanged to whatever changes the context
raise NotImplementedError()
def getImplantsFromContext(self):
""" Gets list of implants from current context """
raise NotImplementedError()
def addImplantToContext(self, item):
""" Adds implant to the current context"""
raise NotImplementedError()
def removeImplantFromContext(self, implant):
""" Removes implant from the current context"""
raise NotImplementedError()
def update(self):
"""Updates implant list based off the current context"""
self.implants = self.getImplantsFromContext()[:]
self.implants.sort(key=lambda i: int(i.getModifiedItemAttr("implantness")))
self.pluggedImplantsTree.update(self.implants)
def contextChanged(self, event):
self.update()
event.Skip()
def expandLookup(self, event):
tree = self.availableImplantsTree
sMkt = Market.getInstance()
parent = event.Item
child, _ = tree.GetFirstChild(parent)
text = tree.GetItemText(child)
if text == "dummy" or text == "itemdummy":
tree.Delete(child)
# if the dummy item is a market group, replace with actual market groups
if text == "dummy":
# Add 'real stoof!' instead
currentMktGrp = sMkt.getMarketGroup(tree.GetItemData(parent), eager="children")
for childMktGrp in sMkt.getMarketGroupChildren(currentMktGrp):
iconId = self.addMarketViewImage(sMkt.getIconByMarketGroup(childMktGrp))
childId = tree.AppendItem(parent, childMktGrp.name, iconId, data=childMktGrp.ID)
if sMkt.marketGroupHasTypesCheck(childMktGrp) is False:
tree.AppendItem(childId, "dummy")
else:
tree.AppendItem(childId, "itemdummy")
# replace dummy with actual items
if text == "itemdummy":
currentMktGrp = sMkt.getMarketGroup(tree.GetItemData(parent))
items = sMkt.getItemsByMarketGroup(currentMktGrp)
for item in items:
iconId = self.addMarketViewImage(item.iconID)
tree.AppendItem(parent, item.name, iconId, data=item)
tree.SortChildren(parent)
def itemSelected(self, event):
if event.EventObject is self.btnAdd:
# janky fix that sets EventObject so that we don't have similar code elsewhere.
if self.itemView.IsShown():
event.EventObject = self.itemView
else:
event.EventObject = self.availableImplantsTree
if event.EventObject is self.itemView:
curr = event.EventObject.GetFirstSelected()
while curr != -1:
item = self.itemView.items[curr]
self.addImplantToContext(item)
curr = event.EventObject.GetNextSelected(curr)
else:
root = self.availableImplantsTree.GetSelection()
if not root.IsOk():
return
nchilds = self.availableImplantsTree.GetChildrenCount(root)
if nchilds == 0:
item = self.availableImplantsTree.GetItemData(root)
self.addImplantToContext(item)
else:
event.Skip()
return
self.update()
def removeItem(self, event):
pos = self.pluggedImplantsTree.GetFirstSelected()
if pos != -1:
self.removeImplantFromContext(self.implants[pos])
self.update()
# Due to https://github.com/wxWidgets/Phoenix/issues/1372 we cannot set tooltips on
# tree itself; work this around with following two methods, by setting tooltip to
# parent window
def OnLeftTreeMouseMove(self, event):
event.Skip()
treeItemId, _ = self.availableImplantsTree.HitTest(event.Position)
if not treeItemId:
if self.hoveredLeftTreeTypeID is not None:
self.hoveredLeftTreeTypeID = None
self.SetToolTip(None)
return
item = self.availableImplantsTree.GetItemData(treeItemId)
isImplant = getattr(item, 'isImplant', False)
if not isImplant:
if self.hoveredLeftTreeTypeID is not None:
self.hoveredLeftTreeTypeID = None
self.SetToolTip(None)
return
if self.hoveredLeftTreeTypeID == item.ID:
return
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredLeftTreeTypeID = item.ID
toolTip = wx.ToolTip(stripHtml(item.description))
toolTip.SetMaxWidth(self.GetSize().Width)
self.SetToolTip(toolTip)
def OnLeftTreeMouseLeave(self, event):
event.Skip()
self.SetToolTip(None)
def OnRightListMouseMove(self, event):
event.Skip()
row, _, col = self.pluggedImplantsTree.HitTestSubItem(event.Position)
if row != self.hoveredRightListRow:
if self.pluggedImplantsTree.ToolTip is not None:
self.pluggedImplantsTree.SetToolTip(None)
else:
self.hoveredRightListRow = row
try:
implant = self.implants[row]
except IndexError:
self.pluggedImplantsTree.SetToolTip(None)
else:
toolTip = wx.ToolTip(stripHtml(implant.item.description))
toolTip.SetMaxWidth(self.pluggedImplantsTree.GetSize().Width)
self.pluggedImplantsTree.SetToolTip(toolTip)
class AvailableImplantsView(d.Display):
DEFAULT_COLS = ["attr:implantness",
"Base Name"]
def __init__(self, parent):
d.Display.__init__(self, parent, style=wx.LC_SINGLE_SEL)
self.Bind(wx.EVT_LEFT_DCLICK, parent.removeItem)
class ItemView(d.Display):
DEFAULT_COLS = ["Base Icon",
"Base Name"]
def __init__(self, parent):
d.Display.__init__(self, parent)
self.parent = parent
self.searchBox = parent.searchBox
self.hoveredRow = None
self.items = []
# Bind search actions
self.searchBox.Bind(SBox.EVT_TEXT_ENTER, self.scheduleSearch)
self.searchBox.Bind(SBox.EVT_SEARCH_BTN, self.scheduleSearch)
self.searchBox.Bind(SBox.EVT_CANCEL_BTN, self.clearSearch)
self.searchBox.Bind(SBox.EVT_TEXT, self.scheduleSearch)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
def clearSearch(self, event=None):
if self.IsShown():
self.parent.availableImplantsTree.Show()
self.Hide()
self.parent.Layout()
if event:
self.searchBox.Clear()
self.items = []
self.update(self.items)
def scheduleSearch(self, event=None):
sMkt = Market.getInstance()
search = self.searchBox.GetLineText(0)
# Make sure we do not count wildcards as search symbol
realsearch = search.replace('*', '').replace('?', '')
# Show nothing if query is too short
if len(realsearch) < 3:
self.clearSearch()
return
sMkt.searchItems(search, self.populateSearch, 'implants')
def populateSearch(self, itemIDs):
if not self.IsShown():
self.parent.availableImplantsTree.Hide()
self.Show()
self.parent.Layout()
items = Market.getItems(itemIDs)
items = [i for i in items if i.group.name != 'Booster']
self.items = sorted(list(items), key=lambda i: i.name)
self.update(self.items)
def OnMouseMove(self, event):
event.Skip()
row, _, col = self.HitTestSubItem(event.Position)
if row != self.hoveredRow:
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredRow = row
try:
item = self.items[row]
except IndexError:
self.SetToolTip(None)
else:
toolTip = wx.ToolTip(stripHtml(item.description))
toolTip.SetMaxWidth(self.GetSize().Width)
self.SetToolTip(toolTip)
| gpl-3.0 |
bchareyre/ratchet | py/ymport.py | 1 | 14686 | """
Import geometry from various formats ('import' is python keyword, hence the name 'ymport').
"""
from yade.wrapper import *
from yade import utils
try:
from minieigen import *
except ImportError:
from miniEigen import *
def textExt(fileName,format='x_y_z_r',shift=Vector3.Zero,scale=1.0,**kw):
"""Load sphere coordinates from file in specific format, returns a list of corresponding bodies; that may be inserted to the simulation with O.bodies.append().
:param str filename: file name
:param str format: the name of output format. Supported `x_y_z_r`(default), `x_y_z_r_matId`
:param [float,float,float] shift: [X,Y,Z] parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere`
:returns: list of spheres.
Lines starting with # are skipped
"""
infile = open(fileName,"r")
lines = infile.readlines()
infile.close()
ret=[]
for line in lines:
data = line.split()
if (data[0] == "#format"):
format=data[1]
continue
elif (data[0][0] == "#"): continue
if (format=='x_y_z_r'):
pos = Vector3(float(data[0]),float(data[1]),float(data[2]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
elif (format=='x_y_z_r_matId'):
pos = Vector3(float(data[0]),float(data[1]),float(data[2]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),material=int(data[4]),**kw))
elif (format=='id_x_y_z_r_matId'):
pos = Vector3(float(data[1]),float(data[2]),float(data[3]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[4]),material=int(data[5]),**kw))
else:
raise RuntimeError("Please, specify a correct format output!");
return ret
def textClumps(fileName,shift=Vector3.Zero,discretization=0,orientation=Quaternion((0,1,0),0.0),scale=1.0,**kw):
"""Load clumps-members from file, insert them to the simulation.
:param str filename: file name
:param str format: the name of output format. Supported `x_y_z_r`(default), `x_y_z_r_clumpId`
:param [float,float,float] shift: [X,Y,Z] parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere`
:returns: list of spheres.
Lines starting with # are skipped
"""
infile = open(fileName,"r")
lines = infile.readlines()
infile.close()
ret=[]
curClump=[]
newClumpId = -1
for line in lines:
data = line.split()
if (data[0][0] == "#"): continue
pos = orientation*Vector3(float(data[0]),float(data[1]),float(data[2]))
if (newClumpId<0 or newClumpId==int(data[4])):
idD = curClump.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
newClumpId = int(data[4])
else:
newClumpId = int(data[4])
ret.append(O.bodies.appendClumped(curClump,discretization=discretization))
curClump=[]
idD = curClump.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
if (len(curClump)<>0):
ret.append(O.bodies.appendClumped(curClump,discretization=discretization))
# Set the mask to a clump the same as the first member of it
for i in range(len(ret)):
O.bodies[ret[i][0]].mask = O.bodies[ret[i][1][0]].mask
return ret
def text(fileName,shift=Vector3.Zero,scale=1.0,**kw):
"""Load sphere coordinates from file, returns a list of corresponding bodies; that may be inserted to the simulation with O.bodies.append().
:param string filename: file which has 4 colums [x, y, z, radius].
:param [float,float,float] shift: [X,Y,Z] parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere`
:returns: list of spheres.
Lines starting with # are skipped
"""
return textExt(fileName=fileName,format='x_y_z_r',shift=shift,scale=scale,**kw)
def stl(file, dynamic=None,fixed=True,wire=True,color=None,highlight=False,noBound=False,material=-1):
""" Import geometry from stl file, return list of created facets."""
imp = STLImporter()
facets=imp.ymport(file)
for b in facets:
b.shape.color=color if color else utils.randomColor()
b.shape.wire=wire
b.shape.highlight=highlight
pos=b.state.pos
utils._commonBodySetup(b,0,Vector3(0,0,0),material=material,pos=pos,noBound=noBound,dynamic=dynamic,fixed=fixed)
b.aspherical=False
return facets
def gts(meshfile,shift=(0,0,0),scale=1.0,**kw):
""" Read given meshfile in gts format.
:Parameters:
`meshfile`: string
name of the input file.
`shift`: [float,float,float]
[X,Y,Z] parameter moves the specimen.
`scale`: float
factor scales the given data.
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.facet`
:Returns: list of facets.
"""
import gts,yade.pack
surf=gts.read(open(meshfile))
surf.scale(scale)
surf.translate(shift)
yade.pack.gtsSurface2Facets(surf,**kw)
def gmsh(meshfile="file.mesh",shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw):
""" Imports geometry from mesh file and creates facets.
:Parameters:
`shift`: [float,float,float]
[X,Y,Z] parameter moves the specimen.
`scale`: float
factor scales the given data.
`orientation`: quaternion
orientation of the imported mesh
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.facet`
:Returns: list of facets forming the specimen.
mesh files can be easily created with `GMSH <http://www.geuz.org/gmsh/>`_.
Example added to :ysrc:`examples/regular-sphere-pack/regular-sphere-pack.py`
Additional examples of mesh-files can be downloaded from
http://www-roc.inria.fr/gamma/download/download.php
"""
infile = open(meshfile,"r")
lines = infile.readlines()
infile.close()
nodelistVector3=[]
findVerticesString=0
while (lines[findVerticesString].split()[0]<>'Vertices'): #Find the string with the number of Vertices
findVerticesString+=1
findVerticesString+=1
numNodes = int(lines[findVerticesString].split()[0])
for i in range(numNodes):
nodelistVector3.append(Vector3(0.0,0.0,0.0))
id = 0
for line in lines[findVerticesString+1:numNodes+findVerticesString+1]:
data = line.split()
nodelistVector3[id] = orientation*Vector3(float(data[0])*scale,float(data[1])*scale,float(data[2])*scale)+shift
id += 1
findTriangleString=findVerticesString+numNodes
while (lines[findTriangleString].split()[0]<>'Triangles'): #Find the string with the number of Triangles
findTriangleString+=1
findTriangleString+=1
numTriangles = int(lines[findTriangleString].split()[0])
triList = []
for i in range(numTriangles):
triList.append([0,0,0,0])
tid = 0
for line in lines[findTriangleString+1:findTriangleString+numTriangles+1]:
data = line.split()
id1 = int(data[0])-1
id2 = int(data[1])-1
id3 = int(data[2])-1
triList[tid][0] = tid
triList[tid][1] = id1
triList[tid][2] = id2
triList[tid][3] = id3
tid += 1
ret=[]
for i in triList:
a=nodelistVector3[i[1]]
b=nodelistVector3[i[2]]
c=nodelistVector3[i[3]]
ret.append(utils.facet((nodelistVector3[i[1]],nodelistVector3[i[2]],nodelistVector3[i[3]]),**kw))
return ret
def gengeoFile(fileName="file.geo",shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw):
""" Imports geometry from LSMGenGeo .geo file and creates spheres.
Since 2012 the package is available in Debian/Ubuntu and known as python-demgengeo
http://packages.qa.debian.org/p/python-demgengeo.html
:Parameters:
`filename`: string
file which has 4 colums [x, y, z, radius].
`shift`: Vector3
Vector3(X,Y,Z) parameter moves the specimen.
`scale`: float
factor scales the given data.
`orientation`: quaternion
orientation of the imported geometry
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.sphere`
:Returns: list of spheres.
LSMGenGeo library allows one to create pack of spheres
with given [Rmin:Rmax] with null stress inside the specimen.
Can be useful for Mining Rock simulation.
Example: :ysrc:`examples/packs/packs.py`, usage of LSMGenGeo library in :ysrc:`examples/test/genCylLSM.py`.
* https://answers.launchpad.net/esys-particle/+faq/877
* http://www.access.edu.au/lsmgengeo_python_doc/current/pythonapi/html/GenGeo-module.html
* https://svn.esscc.uq.edu.au/svn/esys3/lsm/contrib/LSMGenGeo/"""
from yade.utils import sphere
infile = open(fileName,"r")
lines = infile.readlines()
infile.close()
numSpheres = int(lines[6].split()[0])
ret=[]
for line in lines[7:numSpheres+7]:
data = line.split()
pos = orientation*Vector3(float(data[0]),float(data[1]),float(data[2]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
return ret
def gengeo(mntable,shift=Vector3.Zero,scale=1.0,**kw):
""" Imports geometry from LSMGenGeo library and creates spheres.
Since 2012 the package is available in Debian/Ubuntu and known as python-demgengeo
http://packages.qa.debian.org/p/python-demgengeo.html
:Parameters:
`mntable`: mntable
object, which creates by LSMGenGeo library, see example
`shift`: [float,float,float]
[X,Y,Z] parameter moves the specimen.
`scale`: float
factor scales the given data.
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.sphere`
LSMGenGeo library allows one to create pack of spheres
with given [Rmin:Rmax] with null stress inside the specimen.
Can be useful for Mining Rock simulation.
Example: :ysrc:`examples/packs/packs.py`, usage of LSMGenGeo library in :ysrc:`examples/test/genCylLSM.py`.
* https://answers.launchpad.net/esys-particle/+faq/877
* http://www.access.edu.au/lsmgengeo_python_doc/current/pythonapi/html/GenGeo-module.html
* https://svn.esscc.uq.edu.au/svn/esys3/lsm/contrib/LSMGenGeo/"""
try:
from GenGeo import MNTable3D,Sphere
except ImportError:
from gengeo import MNTable3D,Sphere
ret=[]
sphereList=mntable.getSphereListFromGroup(0)
for i in range(0, len(sphereList)):
r=sphereList[i].Radius()
c=sphereList[i].Centre()
ret.append(utils.sphere([shift[0]+scale*float(c.X()),shift[1]+scale*float(c.Y()),shift[2]+scale*float(c.Z())],scale*float(r),**kw))
return ret
def unv(fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw):
""" Import geometry from unv file, return list of created facets.
:param string fileName: name of unv file
:param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.facet`
:param bool returnConnectivityTable: if True, apart from facets returns also nodes (list of (x,y,z) nodes coordinates) and elements (list of (id1,id2,id3) element nodes ids). If False (default), returns only facets
unv files are mainly used for FEM analyses (are used by `OOFEM <http://www.oofem.org/>`_ and `Abaqus <http://www.simulia.com/products/abaqus_fea.html>`_), but triangular elements can be imported as facets.
These files cen be created e.g. with open-source free software `Salome <http://salome-platform.org>`_.
Example: :ysrc:`examples/test/unv-read/unvRead.py`."""
class UNVReader:
# class used in ymport.unv function
# reads and evaluate given unv file and extracts all triangles
# can be extended to read tetrahedrons as well
def __init__(self,fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw):
self.shift = shift
self.scale = scale
self.unvFile = open(fileName,'r')
self.flag = 0
self.line = self.unvFile.readline()
self.lineSplit = self.line.split()
self.nodes = []
self.elements = []
self.read(**kw)
def readLine(self):
self.line = self.unvFile.readline()
self.lineSplit = self.line.split()
def read(self,**kw):
while self.line:
self.evalLine()
self.line = self.unvFile.readline()
self.unvFile.close()
self.createFacets(**kw)
def evalLine(self):
self.lineSplit = self.line.split()
if len(self.lineSplit) <= 1: # eval special unv format
if self.lineSplit[0] == '-1': pass
elif self.lineSplit[0] == '2411': self.flag = 1; # nodes
elif self.lineSplit[0] == '2412': self.flag = 2; # edges (lines)
else: self.flag = 4; # volume elements or other, not interesting for us (at least yet)
elif self.flag == 1: self.evalNodes()
elif self.flag == 2: self.evalEdge()
elif self.flag == 3: self.evalFacet()
#elif self.flag == 4: self.evalGroup()
def evalNodes(self):
self.readLine()
self.nodes.append((
self.shift[0]+self.scale*float(self.lineSplit[0]),
self.shift[1]+self.scale*float(self.lineSplit[1]),
self.shift[2]+self.scale*float(self.lineSplit[2])))
def evalEdge(self):
if self.lineSplit[1]=='41':
self.flag = 3
self.evalFacet()
else:
self.readLine()
self.readLine()
def evalFacet(self):
if self.lineSplit[1]=='41': # triangle
self.readLine()
self.elements.append((
int(self.lineSplit[0])-1,
int(self.lineSplit[1])-1,
int(self.lineSplit[2])-1))
else: # is not triangle
self.readLine()
self.flag = 4
# can be added function to handle tetrahedrons
def createFacets(self,**kw):
self.facets = [utils.facet(tuple(self.nodes[i] for i in e),**kw) for e in self.elements]
#
unvReader = UNVReader(fileName,shift,scale,returnConnectivityTable,**kw)
if returnConnectivityTable:
return unvReader.facets, unvReader.nodes, unvReader.elements
return facets
def iges(fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw):
""" Import triangular mesh from .igs file, return list of created facets.
:param string fileName: name of iges file
:param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.facet`
:param bool returnConnectivityTable: if True, apart from facets returns also nodes (list of (x,y,z) nodes coordinates) and elements (list of (id1,id2,id3) element nodes ids). If False (default), returns only facets
"""
nodes,elems = [],[]
f = open(fileName)
for line in f:
if line.startswith('134,'): # read nodes coordinates
ls = line.split(',')
v = Vector3(
float(ls[1])*scale + shift[0],
float(ls[2])*scale + shift[1],
float(ls[3])*scale + shift[2]
)
nodes.append(v)
if line.startswith('136,'): # read elements
ls = line.split(',')
i1,i2,i3 = int(ls[3])/2, int(ls[4])/2, int(ls[5])/2 # the numbering of nodes is 1,3,5,7,..., hence this int(ls[*])/2
elems.append( (i1,i2,i3) )
facets = [utils.facet( ( nodes[e[0]], nodes[e[1]], nodes[e[2]] ), **kw) for e in elems]
if returnConnectivityTable:
return facets, nodes, elems
return facets
| gpl-2.0 |
solin319/incubator-mxnet | example/speech_recognition/stt_layer_fc.py | 52 | 6097 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from stt_layer_batchnorm import batchnorm
def fc(net,
num_hidden,
act_type,
weight=None,
bias=None,
no_bias=False,
name=None
):
# when weight and bias doesn't have specific name
if weight is None and bias is None:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, no_bias=no_bias, name=name)
# when weight doesn't have specific name but bias has
elif weight is None and bias is not None:
if no_bias:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, no_bias=no_bias, name=name)
else:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, bias=bias, no_bias=no_bias, name=name)
# when bias doesn't have specific name but weight has
elif weight is not None and bias is None:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, weight=weight, no_bias=no_bias, name=name)
# when weight and bias specific name
else:
if no_bias:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, weight=weight, no_bias=no_bias, name=name)
else:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, weight=weight, bias=bias, no_bias=no_bias, name=name)
# activation
if act_type is not None:
net = mx.sym.Activation(data=net, act_type=act_type, name="%s_activation" % name)
return net
def sequence_fc(net,
seq_len,
num_layer,
prefix,
num_hidden_list=[],
act_type_list=[],
is_batchnorm=False,
dropout_rate=0,
):
if num_layer == len(num_hidden_list) == len(act_type_list):
if num_layer > 0:
weight_list = []
bias_list = []
for layer_index in range(num_layer):
weight_list.append(mx.sym.Variable(name='%s_sequence_fc%d_weight' % (prefix, layer_index)))
# if you use batchnorm bias do not have any effect
if not is_batchnorm:
bias_list.append(mx.sym.Variable(name='%s_sequence_fc%d_bias' % (prefix, layer_index)))
# batch normalization parameters
gamma_list = []
beta_list = []
if is_batchnorm:
for layer_index in range(num_layer):
gamma_list.append(mx.sym.Variable(name='%s_sequence_fc%d_gamma' % (prefix, layer_index)))
beta_list.append(mx.sym.Variable(name='%s_sequence_fc%d_beta' % (prefix, layer_index)))
# batch normalization parameters ends
if type(net) is mx.symbol.Symbol:
net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=1, squeeze_axis=1)
elif type(net) is list:
for net_index, one_net in enumerate(net):
if type(one_net) is not mx.symbol.Symbol:
raise Exception('%d th elements of the net should be mx.symbol.Symbol' % net_index)
else:
raise Exception('type of net should be whether mx.symbol.Symbol or list of mx.symbol.Symbol')
hidden_all = []
for seq_index in range(seq_len):
hidden = net[seq_index]
for layer_index in range(num_layer):
if dropout_rate > 0:
hidden = mx.sym.Dropout(data=hidden, p=dropout_rate)
if is_batchnorm:
hidden = fc(net=hidden,
num_hidden=num_hidden_list[layer_index],
act_type=None,
weight=weight_list[layer_index],
no_bias=is_batchnorm,
name="%s_t%d_l%d_fc" % (prefix, seq_index, layer_index)
)
# last layer doesn't have batchnorm
hidden = batchnorm(net=hidden,
gamma=gamma_list[layer_index],
beta=beta_list[layer_index],
name="%s_t%d_l%d_batchnorm" % (prefix, seq_index, layer_index))
hidden = mx.sym.Activation(data=hidden, act_type=act_type_list[layer_index],
name="%s_t%d_l%d_activation" % (prefix, seq_index, layer_index))
else:
hidden = fc(net=hidden,
num_hidden=num_hidden_list[layer_index],
act_type=act_type_list[layer_index],
weight=weight_list[layer_index],
bias=bias_list[layer_index]
)
hidden_all.append(hidden)
net = hidden_all
return net
else:
raise Exception("length doesn't met - num_layer:",
num_layer, ",len(num_hidden_list):",
len(num_hidden_list),
",len(act_type_list):",
len(act_type_list)
)
| apache-2.0 |
ericholscher/pinax | scripts/create-venv-script.py | 3 | 1932 | #!/usr/bin/env python
"""
Call this like ``python pinax/bin/create-venv-script.py``
it will refresh the pinax-boot.py script
"""
import os
import virtualenv
from optparse import OptionParser
from os.path import join, exists, dirname, abspath
def main():
usage = "usage: %prog [options]"
description = "Creates a Pinax boot script and uses version specific installer templates if given a release version."
parser = OptionParser(usage, description=description)
parser.add_option("-r", "--release",
metavar='VERSION', dest='release', default=None,
help='Release version of Pinax to bootstrap')
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
here = dirname(abspath(__file__))
script_name = join(here, 'pinax-boot.py')
installer = join(here, '_installer.py') # _installer.py
if options.release:
release_installer = join(here, 'installers', '%s.py' % options.release) # installers/<version>.py
if exists(release_installer):
installer = release_installer
script_name = join(here, 'pinax-boot-%s.py' % options.release) # pinax-boot-<version>.py
print "Using as template: %s" % installer
extra_text = open(installer).read()
text = virtualenv.create_bootstrap_script(extra_text)
if os.path.exists(script_name):
f = open(script_name)
cur_text = f.read()
f.close()
else:
cur_text = ''
print 'Updating %s' % script_name
if cur_text == text:
print 'No update'
else:
if options.verbose:
print 'Script changed; updating...'
f = open(script_name, 'w')
f.write(text)
f.close()
if __name__ == '__main__':
main()
| mit |
nimiq/moogle-project | magpie/response.py | 1 | 3599 | from abc import ABCMeta, abstractmethod
from utils.exceptions import ResponseError, InconsistentItemError, EntryNotToBeIndexed
class AbstractApiResponse(metaclass=ABCMeta):
"""
Response got after a query to a `Provider`.
Parameters:
response -- a `requests.models.Response` instance.
"""
def __init__(self, response):
self.response = response
self.updates_cursor = ''
self.has_more = False
self.pagination_cursor = ''
self._sanity_check()
def _sanity_check(self):
"""
Check whether the current response got is an error response.
"""
# If the HTTP status code is not 200, then it is an error.
if self.response.status_code != 200:
msg = 'HTTP Status: {}\n{}'.format(self.response.status_code, self.response.json())
raise ResponseError(msg)
def parse(self, bearertoken_id):
redis = self._init_redis_list(bearertoken_id)
self._hook_parse_entire_response(redis)
is_first_entry = True
entry = None
for entry in self._entries_to_apientries():
# `entry` is a `Api<Provider>Entry` instance.
redis.buffer(entry)
# Updates cursor: the `updated_time` of the most recent post.
if is_first_entry:
self._hook_parse_first_entry(entry)
is_first_entry = False
if entry: # if there is at least 1 `entry`.
self._hook_parse_last_entry(entry)
redis.flush_buffer()
@abstractmethod
def _init_redis_list(self, *args, **kwargs):
pass
def _hook_parse_entire_response(self, redis):
pass
def _hook_parse_first_entry(self, entry):
pass
def _hook_parse_last_entry(self, entry):
pass
@abstractmethod
def _build_pagination_cursor(self):
pass
@abstractmethod
def _build_updates_cursor(self):
pass
def _entries_to_apientries(self):
"""
Iter over all entries in the response.
Each entry in the response is converted to a `Api<Provider>Entry` instance.
"""
entries_list = self._extract_entries_list()
def _lpop():
"""
Pop from the head of the list.
Convert the item to `Api<Provider>Entry`.
"""
while True:
try:
entry = entries_list.pop(0) # Raise IndexError when completely consumed.
entry = self._init_api_provider_entry(entry)
return entry
except IndexError:
# `self.response` is empty, return None to stop the iter.
return None
except EntryNotToBeIndexed:
# The entry is probably a dir or not a textual file and we don't need to
# index it
continue
except InconsistentItemError as e:
# The entry is not consistent, like some important metadata are missing,
# we just skip it
# TODO log it anyway
continue
# The first argument of iter must be a callable, that's why we created the _lpop()
# closure. This closure will be called for each iteration and the result is returned
# until the result is None.
return iter(_lpop, None)
def _extract_entries_list(self):
return self.response.json()
@abstractmethod
def _init_api_provider_entry(self, *args, **kwargs):
pass | apache-2.0 |
feroda/gasistafelice | gasistafelice/rest/views/blocks/order_details.py | 3 | 1831 | """View for block details specialized for a GASSupplierOrder"""
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from gasistafelice.rest.views.blocks import details
from gasistafelice.gas.forms import order as order_forms
from gasistafelice.consts import INCOME, EXPENSE
from gasistafelice.rest.views.blocks.base import ResourceBlockAction
import logging
log = logging.getLogger(__name__)
class Block(details.Block):
BLOCK_NAME = "order_details"
BLOCK_VALID_RESOURCE_TYPES = ["order"]
def _get_edit_form_class(self):
return order_forms.form_class_factory_for_request(self.request, base=order_forms.EditOrderForm)
def _get_user_actions(self, request):
user_actions = super(Block, self)._get_user_actions(request)
#refs = [] #request.resource.cash_referrers
#if refs and request.user in refs:
#UGLY: remove this code when send email transition is done.
#REMOVE temporarly un-managed transitions
new_user_actions = []
for ua in user_actions:
confirm_text_d = {
'transition/close' : _("Order will be closed. GAS members will not be able to order anymore. Are you sure?"),
'transition/cancel' : _("Order will be CANCELED, so gas members orders will be CANCELED. Are you sure?"),
}
confirm_text = confirm_text_d.get(ua.name)
if confirm_text:
ua.confirm_text = confirm_text
#print("User action: %s" % ua.name)
if ua.name not in [
'transition/make unpaid',
'transition/close and send email', #FIXME: disabled actions until implemented
'transition/archive'
]:
new_user_actions.append(ua)
return new_user_actions
| agpl-3.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/scipy/optimize/tests/test_slsqp.py | 20 | 14213 | """
Unit test for SLSQP optimization.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_, assert_array_almost_equal, TestCase,
assert_allclose, assert_equal, run_module_suite)
import numpy as np
from scipy._lib._testutils import knownfailure_overridable
from scipy.optimize import fmin_slsqp, minimize
class MyCallBack(object):
"""pass a custom callback function
This makes sure it's being used.
"""
def __init__(self):
self.been_called = False
self.ncalls = 0
def __call__(self, x):
self.been_called = True
self.ncalls += 1
class TestSLSQP(TestCase):
"""
Test SLSQP algorithm using Example 14.4 from Numerical Methods for
Engineers by Steven Chapra and Raymond Canale.
This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2,
which has a maximum at x=2, y=1.
"""
def setUp(self):
self.opts = {'disp': False}
def fun(self, d, sign=1.0):
"""
Arguments:
d - A list of two elements, where d[0] represents x and d[1] represents y
in the following equation.
sign - A multiplier for f. Since we want to optimize it, and the scipy
optimizers can only minimize functions, we need to multiply it by
-1 to achieve the desired solution
Returns:
2*x*y + 2*x - x**2 - 2*y**2
"""
x = d[0]
y = d[1]
return sign*(2*x*y + 2*x - x**2 - 2*y**2)
def jac(self, d, sign=1.0):
"""
This is the derivative of fun, returning a numpy array
representing df/dx and df/dy.
"""
x = d[0]
y = d[1]
dfdx = sign*(-2*x + 2*y + 2)
dfdy = sign*(2*x - 4*y)
return np.array([dfdx, dfdy], float)
def fun_and_jac(self, d, sign=1.0):
return self.fun(d, sign), self.jac(d, sign)
def f_eqcon(self, x, sign=1.0):
""" Equality constraint """
return np.array([x[0] - x[1]])
def fprime_eqcon(self, x, sign=1.0):
""" Equality constraint, derivative """
return np.array([[1, -1]])
def f_eqcon_scalar(self, x, sign=1.0):
""" Scalar equality constraint """
return self.f_eqcon(x, sign)[0]
def fprime_eqcon_scalar(self, x, sign=1.0):
""" Scalar equality constraint, derivative """
return self.fprime_eqcon(x, sign)[0].tolist()
def f_ieqcon(self, x, sign=1.0):
""" Inequality constraint """
return np.array([x[0] - x[1] - 1.0])
def fprime_ieqcon(self, x, sign=1.0):
""" Inequality constraint, derivative """
return np.array([[1, -1]])
def f_ieqcon2(self, x):
""" Vector inequality constraint """
return np.asarray(x)
def fprime_ieqcon2(self, x):
""" Vector inequality constraint, derivative """
return np.identity(x.shape[0])
# minimize
def test_minimize_unbounded_approximated(self):
# Minimize, method='SLSQP': unbounded, approximated jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_unbounded_given(self):
# Minimize, method='SLSQP': unbounded, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
jac=self.jac, method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_bounded_approximated(self):
# Minimize, method='SLSQP': bounded, approximated jacobian.
with np.errstate(invalid='ignore'):
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
bounds=((2.5, None), (None, 0.5)),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2.5, 0.5])
assert_(2.5 <= res.x[0])
assert_(res.x[1] <= 0.5)
def test_minimize_unbounded_combined(self):
# Minimize, method='SLSQP': unbounded, combined function and jacobian.
res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ),
jac=True, method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_equality_approximated(self):
# Minimize with method='SLSQP': equality constraint, approx. jacobian.
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, )},
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given(self):
# Minimize with method='SLSQP': equality constraint, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
method='SLSQP', args=(-1.0,),
constraints={'type': 'eq', 'fun':self.f_eqcon,
'args': (-1.0, )},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given2(self):
# Minimize with method='SLSQP': equality constraint, given jacobian
# for fun and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0,),
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_equality_given_cons_scalar(self):
# Minimize with method='SLSQP': scalar equality constraint, given
# jacobian for fun and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0,),
constraints={'type': 'eq',
'fun': self.f_eqcon_scalar,
'args': (-1.0, ),
'jac': self.fprime_eqcon_scalar},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [1, 1])
def test_minimize_inequality_given(self):
# Minimize with method='SLSQP': inequality constraint, given jacobian.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0, ),
constraints={'type': 'ineq',
'fun': self.f_ieqcon,
'args': (-1.0, )},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1], atol=1e-3)
def test_minimize_inequality_given_vector_constraints(self):
# Minimize with method='SLSQP': vector inequality constraint, given
# jacobian.
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
method='SLSQP', args=(-1.0,),
constraints={'type': 'ineq',
'fun': self.f_ieqcon2,
'jac': self.fprime_ieqcon2},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2, 1])
def test_minimize_bound_equality_given2(self):
# Minimize with method='SLSQP': bounds, eq. const., given jac. for
# fun. and const.
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
jac=self.jac, args=(-1.0, ),
bounds=[(-0.8, 1.), (-1, 0.8)],
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [0.8, 0.8], atol=1e-3)
assert_(-0.8 <= res.x[0] <= 1)
assert_(-1 <= res.x[1] <= 0.8)
# fmin_slsqp
def test_unbounded_approximated(self):
# SLSQP: unbounded, approximated jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1])
def test_unbounded_given(self):
# SLSQP: unbounded, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
fprime = self.jac, iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1])
def test_equality_approximated(self):
# SLSQP: equality constraint, approximated jacobian.
res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,),
eqcons = [self.f_eqcon],
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_equality_given(self):
# SLSQP: equality constraint, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0,),
eqcons = [self.f_eqcon], iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_equality_given2(self):
# SLSQP: equality constraint, given jacobian for fun and const.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0,),
f_eqcons = self.f_eqcon,
fprime_eqcons = self.fprime_eqcon,
iprint = 0,
full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [1, 1])
def test_inequality_given(self):
# SLSQP: inequality constraint, given jacobian.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0, ),
ieqcons = [self.f_ieqcon],
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [2, 1], decimal=3)
def test_bound_equality_given2(self):
# SLSQP: bounds, eq. const., given jac. for fun. and const.
res = fmin_slsqp(self.fun, [-1.0, 1.0],
fprime=self.jac, args=(-1.0, ),
bounds = [(-0.8, 1.), (-1, 0.8)],
f_eqcons = self.f_eqcon,
fprime_eqcons = self.fprime_eqcon,
iprint = 0, full_output = 1)
x, fx, its, imode, smode = res
assert_(imode == 0, imode)
assert_array_almost_equal(x, [0.8, 0.8], decimal=3)
assert_(-0.8 <= x[0] <= 1)
assert_(-1 <= x[1] <= 0.8)
def test_scalar_constraints(self):
# Regression test for gh-2182
x = fmin_slsqp(lambda z: z**2, [3.],
ieqcons=[lambda z: z[0] - 1],
iprint=0)
assert_array_almost_equal(x, [1.])
x = fmin_slsqp(lambda z: z**2, [3.],
f_ieqcons=lambda z: [z[0] - 1],
iprint=0)
assert_array_almost_equal(x, [1.])
def test_integer_bounds(self):
# This should not raise an exception
fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0)
def test_callback(self):
# Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback
callback = MyCallBack()
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
method='SLSQP', callback=callback, options=self.opts)
assert_(res['success'], res['message'])
assert_(callback.been_called)
assert_equal(callback.ncalls, res['nit'])
def test_inconsistent_linearization(self):
# SLSQP must be able to solve this problem, even if the
# linearized problem at the starting point is infeasible.
# Linearized constraints are
#
# 2*x0[0]*x[0] >= 1
#
# At x0 = [0, 1], the second constraint is clearly infeasible.
# This triggers a call with n2==1 in the LSQ subroutine.
x = [0, 1]
f1 = lambda x: x[0] + x[1] - 2
f2 = lambda x: x[0]**2 - 1
sol = minimize(
lambda x: x[0]**2 + x[1]**2,
x,
constraints=({'type':'eq','fun': f1},
{'type':'ineq','fun': f2}),
bounds=((0,None), (0,None)),
method='SLSQP')
x = sol.x
assert_allclose(f1(x), 0, atol=1e-8)
assert_(f2(x) >= -1e-8)
assert_(sol.success, sol)
@knownfailure_overridable("This bug is not fixed")
def test_regression_5743(self):
# SLSQP must not indicate success for this problem,
# which is infeasible.
x = [1, 2]
sol = minimize(
lambda x: x[0]**2 + x[1]**2,
x,
constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1},
{'type':'ineq','fun': lambda x: x[0]-2}),
bounds=((0,None), (0,None)),
method='SLSQP')
assert_(not sol.success, sol)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
pdsteele/DES-Python | rvms.py | 1 | 20759 |
# -------------------------------------------------------------------------
# * This is an ANSI C library that can be used to evaluate the probability
# * density functions (pdf's), cumulative distribution functions (cdf's), and
# * inverse distribution functions (idf's) for a variety of discrete and
# * continuous random variables.
# *
# * The following notational conventions are used
# * x : possible value of the random variable
# * u : real variable (probability) between 0.0 and 1.0
# * a, b, n, p, m, s : distribution-specific parameters
# *
# * There are pdf's, cdf's and idf's for 6 discrete random variables
# *
# * Random Variable Range (x) Mean Variance
# *
# * Bernoulli(p) 0..1 p p*(1-p)
# * Binomial(n, p) 0..n n*p n*p*(1-p)
# * Equilikely(a, b) a..b (a+b)/2 ((b-a+1)*(b-a+1)-1)/12
# * Geometric(p) 0... p/(1-p) p/((1-p)*(1-p))
# * Pascal(n, p) 0... n*p/(1-p) n*p/((1-p)*(1-p))
# * Poisson(m) 0... m m
# *
# * and for 7 continuous random variables
# *
# * Uniform(a, b) a < x < b (a+b)/2 (b-a)*(b-a)/12
# * Exponential(m) x > 0 m m*m
# * Erlang(n, b) x > 0 n*b n*b*b
# * Normal(m, s) all x m s*s
# * Lognormal(a, b) x > 0 see below
# * Chisquare(n) x > 0 n 2*n
# * Student(n) all x 0 (n > 1) n/(n-2) (n > 2)
# *
# * For the Lognormal(a, b), the mean and variance are
# *
# * mean = Exp(a + 0.5*b*b)
# * variance = (Exp(b*b) - )1*Exp(2*a + b*b)
# *
# * Name : rvms.c (Random Variable ModelS)
# * Author : Steve Park & Dave Geyer
# * Language : ANSI C
# * Latest Revision : 11-22-97
# Translated by : Philip Steele
# Language : Python 3.3
# Latest Revision : 3/26/14
# * -------------------------------------------------------------------------
from math import exp, log, fabs, sqrt
#from rvgs import
TINY= 1.0e-10
SQRT2PI= 2.506628274631 # #/* sqrt(2 * pi) */
# static double pdfStandard(x)
# static double cdfStandard(x)
# static double idfStandard(u)
# static double LogGamma(a)
# static double LogBeta(a, b)
# static double InGamma(a, b)
# static double InBeta(a, b, x)
def pdfBernoulli(p,x):
# =======================================
# * NOTE: use 0.0 < p < 1.0 and 0 <= x <= 1
# * =======================================
if(x==0):
return (1.0-p)
else:
return (p)
def cdfBernoulli(p,x):
# =======================================
# * NOTE: use 0.0 < p < 1.0 and 0 <= x <= 1
# * =======================================
if(x==0):
return (1.0-p)
else:
return (1)
def idfBernoulli(p,u):
# =========================================
# * NOTE: use 0.0 < p < 1.0 and 0.0 < u < 1.0
# * =========================================
if (u < 1.0 - p):
return(0)
else:
return(1)
def pdfEquilikely(a,b,x):
# ============================================
# * NOTE: use a <= x <= b
# * ============================================
return (1.0 / (b - a + 1.0))
def cdfEquilikely(a,b,x):
# ============================================
# * NOTE: use a <= x <= b
# * ============================================
return ((x - a + 1.0) / (b - a + 1.0))
def idfEquilikely(a,b,u):
# ============================================
# * NOTE: use a <= b and 0.0 < u < 1.0
# * ============================================
#LIKELY NEEDS TEST
return (a + int(u * (b - a + 1)))
def pdfBinomial(n,p,x):
# ============================================
# * NOTE: use 0 <= x <= n and 0.0 < p < 1.0
# * ============================================
# TEST
s = LogChoose(n, x)
t = x * log(p) + (n - x) * log(1.0 - p)
return (exp(s + t))
def cdfBinomial(n,p,x):
# ============================================
# * NOTE: use 0 <= x <= n and 0.0 < p < 1.0
# * ============================================
if (x < n):
return (1.0 - InBeta(x + 1, n - x, p))
else:
return (1.0)
def idfBinomial(n,p,u):
# =================================================
# * NOTE: use 0 <= n, 0.0 < p < 1.0 and 0.0 < u < 1.0
# * =================================================
x = int(n * p) #/* start searching at the mean */
if (cdfBinomial(n, p, x) <= u):
while (cdfBinomial(n, p, x) <= u):
x += 1
elif (cdfBinomial(n, p, 0) <= u):
while (cdfBinomial(n, p, x - 1) > u):
x -= 1
else:
x = 0
return (x)
def pdfGeometric(p,x):
# =====================================
# * NOTE: use 0.0 < p < 1.0 and x >= 0
# * =====================================
return ((1.0 - p) * exp(x * log(p)))
def cdfGeometric(p,x):
# =====================================
# * NOTE: use 0.0 < p < 1.0 and x >= 0
# * =====================================
return (1.0 - exp((x + 1) * log(p)))
def idfGeometric(p,u):
# =========================================
# * NOTE: use 0.0 < p < 1.0 and 0.0 < u < 1.0
# * =========================================
return ((long) (log(1.0 - u) / log(p)))
def pdfPascal(n,p,x):
# ===========================================
# * NOTE: use n >= 1, 0.0 < p < 1.0, and x >= 0
# * ===========================================
s = LogChoose(n + x - 1, x)
t = x * log(p) + n * log(1.0 - p)
return (exp(s + t))
def cdfPascal(n,p,x):
# ===========================================
# * NOTE: use n >= 1, 0.0 < p < 1.0, and x >= 0
# * ===========================================
return (1.0 - InBeta(x + 1, n, p))
def idfPascal(n,p,u):
# ==================================================
# * NOTE: use n >= 1, 0.0 < p < 1.0, and 0.0 < u < 1.0
# * ==================================================
x = int(n * p / (1.0 - p)) #/* start searching at the mean */
if (cdfPascal(n, p, x) <= u):
while (cdfPascal(n, p, x) <= u):
x += 1
elif (cdfPascal(n, p, 0) <= u):
while (cdfPascal(n, p, x - 1) > u):
x -= 1
else:
x = 0
return (x)
def pdfPoisson(m,x):
# ===================================
# * NOTE: use m > 0 and x >= 0
# * ===================================
t = - m + x * log(m) - LogFactorial(x)
return (exp(t))
def cdfPoisson(m,x):
# ===================================
# * NOTE: use m > 0 and x >= 0
# * ===================================
return (1.0 - InGamma(x + 1, m))
def idfPoisson(m,u):
# ===================================
# * NOTE: use m > 0 and 0.0 < u < 1.0
# * ===================================
x = int(m) #/* start searching at the mean */
if (cdfPoisson(m, x) <= u):
while (cdfPoisson(m, x) <= u):
x += 1
elif (cdfPoisson(m, 0) <= u):
while (cdfPoisson(m, x - 1) > u):
x -= 1
else:
x = 0
return (x)
def pdfUniform(a, b, x):
# ===============================================
# * NOTE: use a < x < b
# * ===============================================
return (1.0 / (b - a))
def cdfUniform(a, b, x):
# ===============================================
# * NOTE: use a < x < b
# * ===============================================
return ((x - a) / (b - a))
def idfUniform(a, b, u):
# ===============================================
# * NOTE: use a < b and 0.0 < u < 1.0
# * ===============================================
return (a + (b - a) * u)
def pdfExponential(m, x):
# =========================================
# * NOTE: use m > 0 and x > 0
# * =========================================
return ((1.0 / m) * exp(- x / m))
def cdfExponential(m, x):
# =========================================
# * NOTE: use m > 0 and x > 0
# * =========================================
return (1.0 - exp(- x / m))
def idfExponential(m, u):
# =========================================
# * NOTE: use m > 0 and 0.0 < u < 1.0
# * =========================================
return (- m * log(1.0 - u))
def pdfErlang(n, b, x):
# ============================================
# * NOTE: use n >= 1, b > 0, and x > 0
# * ============================================
t = (n - 1) * log(x / b) - (x / b) - log(b) - LogGamma(n)
return (exp(t))
def cdfErlang(n, b, x):
# ============================================
# * NOTE: use n >= 1, b > 0, and x > 0
# * ============================================
return (InGamma(n, x / b))
def idfErlang(n, b, u):
# ============================================
# * NOTE: use n >= 1, b > 0 and 0.0 < u < 1.0
# * ============================================
x = n*b
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
x = t + (u - cdfErlang(n, b, t)) / pdfErlang(n, b, t)
if (x <= 0.0):
x = 0.5 * t
condition = (fabs(x - t) >= TINY)
return (x)
def pdfStandard(x):
# ===================================
# * NOTE: x can be any value
# * ===================================
return (exp(- 0.5 * x * x) / SQRT2PI)
def cdfStandard(x):
# ===================================
# * NOTE: x can be any value
# * ===================================
t = InGamma(0.5, 0.5 * x * x)
if (x < 0.0):
return (0.5 * (1.0 - t))
else:
return (0.5 * (1.0 + t))
def idfStandard(u):
# ===================================
# * NOTE: 0.0 < u < 1.0
# * ===================================
t = 0.0
x = 0.0 #/* initialize to the mean, then */
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
x = t + (u - cdfStandard(t)) / pdfStandard(t)
condition = (fabs(x - t) >= TINY)
return (x)
def pdfNormal(m, s, x):
# ==============================================
# * NOTE: x and m can be any value, but s > 0.0
# * =============================================
t = (x - m) / s
return (pdfStandard(t) / s)
def cdfNormal(m, s, x):
# ==============================================
# * NOTE: x and m can be any value, but s > 0.0
# * ==============================================
t = (x - m) / s
return (cdfStandard(t))
def idfNormal(m, s, u):
# =======================================================
# * NOTE: m can be any value, but s > 0.0 and 0.0 < u < 1.0
# * =======================================================
return (m + s * idfStandard(u))
def pdfLognormal(a, b, x):
# ===================================================
# * NOTE: a can have any value, but b > 0.0 and x > 0.0
# * ===================================================
t = (log(x) - a) / b
return (pdfStandard(t) / (b * x))
def cdfLognormal(a, b, x):
# ===================================================
# * NOTE: a can have any value, but b > 0.0 and x > 0.0
# * ===================================================
t = (log(x) - a) / b
return (cdfStandard(t))
def idfLognormal(a, b, u):
# =========================================================
# * NOTE: a can have any value, but b > 0.0 and 0.0 < u < 1.0
# * =========================================================
t = a + b * idfStandard(u)
return (exp(t))
def pdfChisquare(n, x):
# =====================================
# * NOTE: use n >= 1 and x > 0.0
# * =====================================
t= n/2.0
s = n / 2.0
t = (s - 1.0) * log(x / 2.0) - (x / 2.0) - log(2.0) - LogGamma(s)
return (exp(t))
def cdfChisquare(n, x):
# =====================================
# * NOTE: use n >= 1 and x > 0.0
# * ====================================
return (InGamma(n / 2.0, x / 2))
def idfChisquare(n, u):
# =====================================
# * NOTE: use n >= 1 and 0.0 < u < 1.0
# * =====================================
x = n #/* initialize to the mean, then */
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
x = t + (u - cdfChisquare(n, t)) / pdfChisquare(n, t)
if (x <= 0.0):
x = 0.5 * t
condition = (fabs(x - t) >= TINY)
return (x)
def pdfStudent(n, x):
# ===================================
# * NOTE: use n >= 1 and x > 0.0
# * ===================================
s = -0.5 * (n + 1) * log(1.0 + ((x * x) / float(n)))
t = -1*LogBeta(0.5, n / 2.0)
return (exp(s + t) / sqrt(float(n)))
def cdfStudent(n, x):
# ===================================
# * NOTE: use n >= 1 and x > 0.0
# * ===================================
t = (x * x) / (n + x * x)
s = InBeta(0.5, n / 2.0, t)
if (x >= 0.0):
return (0.5 * (1.0 + s))
else:
return (0.5 * (1.0 - s))
def idfStudent(n, u):
# ===================================
# * NOTE: use n >= 1 and 0.0 < u < 1.0
# * ===================================
t = 0.0
x = 0.0 #/* initialize to the mean, then */
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
# print("t is set to "+ t)
x = t + (u - cdfStudent(n, t)) / pdfStudent(n, t)
# print("x is set to "+x)
# print(fabs(x-t))
condition = (fabs(x - t) >= TINY)
return (x)
# ===================================================================
# * The six functions that follow are a 'special function' mini-library
# * used to support the evaluation of pdf, cdf and idf functions.
# * ===================================================================
def LogGamma(a):
# ========================================================================
# * LogGamma returns the natural log of the gamma function.
# * NOTE: use a > 0.0
# *
# * The algorithm used to evaluate the natural log of the gamma function is
# * based on an approximation by C. Lanczos, SIAM J. Numerical Analysis, B,
# * vol 1, 1964. The constants have been selected to yield a relative error
# * which is less than 2.0e-10 for all positive values of the parameter a.
# * ========================================================================
s = []
s.append(76.180091729406 / a)
s.append(-86.505320327112 / (a + 1.0))
s.append(24.014098222230 / (a + 2.0))
s.append(-1.231739516140 / (a + 3.0))
s.append(0.001208580030 / (a + 4.0))
s.append(-0.000005363820 / (a + 5.0))
sum = 1.000000000178
for i in range(0,6):
sum += s[i]
temp = (a - 0.5) * log(a + 4.5) - (a + 4.5) + log(SQRT2PI * sum)
return (temp)
def LogFactorial(n):
# ==================================================================
# * LogFactorial(n) returns the natural log of n!
# * NOTE: use n >= 0
# *
# * The algorithm used to evaluate the natural log of n! is based on a
# * simple equation which relates the gamma and factorial functions.
# * ==================================================================
return (LogGamma(n + 1))
def LogBeta(a,b):
# ======================================================================
# * LogBeta returns the natural log of the beta function.
# * NOTE: use a > 0.0 and b > 0.0
# *
# * The algorithm used to evaluate the natural log of the beta function is
# * based on a simple equation which relates the gamma and beta functions.
# *
return (LogGamma(a) + LogGamma(b) - LogGamma(a + b))
def LogChoose(n,m):
# ========================================================================
# * LogChoose returns the natural log of the binomial coefficient C(n,m).
# * NOTE: use 0 <= m <= n
# *
# * The algorithm used to evaluate the natural log of a binomial coefficient
# * is based on a simple equation which relates the beta function to a
# * binomial coefficient.
# * ========================================================================
if (m > 0):
return (-LogBeta(m, n - m + 1) - log(m))
else:
return (0.0)
def InGamma(a,x):
# ========================================================================
# * Evaluates the incomplete gamma function.
# * NOTE: use a > 0.0 and x >= 0.0
# *
# * The algorithm used to evaluate the incomplete gamma function is based on
# * Algorithm AS 32, J. Applied Statistics, 1970, by G. P. Bhattacharjee.
# * See also equations 6.5.29 and 6.5.31 in the Handbook of Mathematical
# * Functions, Abramowitz and Stegum (editors). The absolute error is less
# * than 1e-10 for all non-negative values of x.
# * ========================================================================
if (x > 0.0):
factor = exp(-1*x + a*log(x) - LogGamma(a))
else:
factor = 0.0
if (x < a + 1.0): ##/* evaluate as an infinite series - */
t = a ##/* A & S equation 6.5.29 */
term = 1.0 / a
sum = term
while (term >= TINY * sum): ##/* sum until 'term' is small */
t += 1
term = term*(x / t)
sum += term
#EndWhile
return (factor * sum)
else: ##/* evaluate as a continued fraction - */
p = [0.0,1.0, -1] ##/* A & S eqn 6.5.31 with the extended */
q = [1.0,x, -1] ##/* pattern 2-a, 2, 3-a, 3, 4-a, 4,... */
##/* - see also A & S sec 3.10, eqn (3) */
f = p[1] / q[1]
n = 0
condition = True
while(condition == True): ##/* recursively generate the continued */
g = f ##/* fraction 'f' until two consecutive */
n += 1 ##/* values are small */
if ((n % 2) > 0):
c=[(((n + 1) / 2.0) - a), 1]
else:
c=[(n / 2.0),x]
p[2] = (c[1] * p[1] + c[0] * p[0])
q[2] = (c[1] * q[1] + c[0] * q[0])
if (q[2] != 0.0): ##/* rescale to avoid overflow */
p[0] = p[1] / q[2]
q[0] = q[1] / q[2]
p[1] = p[2] / q[2]
q[1] = 1.0
f = p[1]
condition = (fabs(f - g) >= TINY) or (q[1] != 1.0)
return (1.0 - factor * f)
def InBeta(a,b,x):
# =======================================================================
# * Evaluates the incomplete beta function.
# * NOTE: use a > 0.0, b > 0.0 and 0.0 <= x <= 1.0
# *
# * The algorithm used to evaluate the incomplete beta function is based on
# * equation 26.5.8 in the Handbook of Mathematical Functions, Abramowitz
# * and Stegum (editors). The absolute error is less than 1e-10 for all x
# * between 0 and 1.
# * =======================================================================
if (x > (a + 1.0) / (a + b + 1.0)): # #/* to accelerate convergence */
swap = 1 ##/* complement x and swap a & b */
x = 1.0 - x
t = a
a = b
b = t
else: ##/* do nothing */
swap = 0
if (x > 0):
factor = exp(a * log(x) + b * log(1.0 - x) - LogBeta(a,b)) / a
else:
factor = 0.0
p = [0.0,1.0, -1]
q = [1.0,1.0, -1]
f = p[1] / q[1]
n = 0
condition = True
while (condition==True): ##/* recursively generate the continued */
g = f ##/* fraction 'f' until two consecutive */
n += 1 ##/* values are small */
if ((n % 2) > 0):
t = (n - 1) / 2.0
c = -(a + t) * (a + b + t) * x / ((a + n - 1.0) * (a + n))
else:
t = n / 2.0
c = t * (b - t) * x / ((a + n - 1.0) * (a + n))
p[2] = (p[1] + c * p[0])
q[2] = (q[1] + c * q[0])
if (q[2] != 0.0): ##/* rescale to avoid overflow */
p[0] = p[1] / q[2]
q[0] = q[1] / q[2]
p[1] = p[2] / q[2]
q[1] = 1.0
f = p[1]
condition = ((fabs(f - g) >= TINY) or (q[1] != 1.0))
#endWhile
if (swap == 1):
return (1.0 - factor * f)
else:
return (factor * f)
# C output:
# IDFSTU(10,.8) is 0.879058 - PASS
# IDFStud(10,.975) is 2.228139 - PASS
# IDFStud(100,.975) is 1.983972 - PASS
# IDFchisq(10,.5) is 9.341818 - PASS
# IDFchisq(15,.8) is 19.310657 - PASS
# IDFerlang(16,4,.878) is 82.934761 - PASS
# IDFerlang(20,7,.113) is 103.476309 - PASS
# IDFpoisson(16,.878) is 21.000000 - PASS
# IDFpoisson(19,.231) is 16.000000 - PASS
# IDFNorm(9,2,.66) is 9.824926 - PASS
# IDFNorm(-19,3.4,.81) is -16.015153 - PASS
# idfPascal(23,.11,.90) is 5.000000 - PASS
# idfPascal(6,.5,.5) is 6.000000 - PASS
# idfBinomial(23,.11,.90) is 5.000000 - PASS
# idfBinomial(6,.5,.5) is 3.000000 - PASS | mit |
derekjamescurtis/veritranspay | tests/response_virtualaccount_charge_tests.py | 1 | 5544 | from unittest import TestCase
from veritranspay.response.response import VirtualAccountBniChargeResponse, VirtualAccountPermataChargeResponse, \
VirtualAccountBcaChargeResponse, VirtualAccountMandiriChargeResponse
class VirtualAccountPermataChargeResponseTests(TestCase):
"""
https://api-docs.midtrans.com/#permata-virtual-account
"""
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#permata-virtual-account
self.response_json = {
"status_code": "201",
"status_message": "Success, PERMATA VA transaction is successful",
"transaction_id": "6fd88567-62da-43ff-8fe6-5717e430ffc7",
"order_id": "H17550",
"gross_amount": "145000.00",
"payment_type": "bank_transfer",
"transaction_time": "2016-06-19 13:42:29",
"transaction_status": "pending",
"fraud_status": "accept",
"permata_va_number": "8562000087926752"
}
self.parsed_response = VirtualAccountPermataChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('bank_transfer', self.parsed_response.payment_type)
def test_payment_code(self):
self.assertEqual('8562000087926752', self.parsed_response.permata_va_number)
class VirtualAccountBcaChargeResponseTests(TestCase):
"""
https://api-docs.midtrans.com/#bca-virtual-account
"""
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#bca-virtual-account
self.response_json = {
"status_code": "201",
"status_message": "Success, Bank Transfer transaction is created",
"transaction_id": "9aed5972-5b6a-401e-894b-a32c91ed1a3a",
"order_id": "1466323342",
"gross_amount": "20000.00",
"payment_type": "bank_transfer",
"transaction_time": "2016-06-19 15:02:22",
"transaction_status": "pending",
"va_numbers": [
{
"bank": "bca",
"va_number": "91019021579"
}
],
"fraud_status": "accept"
}
self.parsed_response = VirtualAccountBcaChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('bank_transfer', self.parsed_response.payment_type)
def test_payment_bank(self):
self.assertEqual('bca', self.parsed_response.va_numbers[0]['bank'])
def test_payment_vanumber(self):
self.assertEqual('91019021579', self.parsed_response.va_numbers[0]['va_number'])
class VirtualAccountBniChargeResponseTests(TestCase):
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#bni-virtual-account
self.response_json = {
"status_code": "201",
"status_message": "Success, Bank Transfer transaction is created",
"transaction_id": "9aed5972-5b6a-401e-894b-a32c91ed1a3a",
"order_id": "1466323342",
"gross_amount": "20000.00",
"payment_type": "bank_transfer",
"transaction_time": "2016-06-19 15:02:22",
"transaction_status": "pending",
"va_numbers": [
{
"bank": "bni",
"va_number": "8578000000111111"
}
],
"fraud_status": "accept"
}
self.parsed_response = VirtualAccountBniChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('bank_transfer', self.parsed_response.payment_type)
def test_payment_bank(self):
self.assertEqual('Bni', self.parsed_response.bank)
def test_payment_vabank(self):
self.assertEqual('bni', self.parsed_response.va_numbers[0]['bank'])
def test_payment_vanumber(self):
self.assertEqual('8578000000111111', self.parsed_response.va_numbers[0]['va_number'])
class VirtualAccountMandiriChargeResponseTests(TestCase):
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#mandiri-bill-payment
self.response_json = {
"status_code": "201",
"status_message": "Success, Mandiri Bill transaction is successful",
"transaction_id": "883af6a4-c1b4-4d39-9bd8-b148fcebe853",
"order_id": "tes",
"gross_amount": "1000.00",
"payment_type": "echannel",
"transaction_time": "2016-06-19 14:40:19",
"transaction_status": "pending",
"fraud_status": "accept",
"bill_key": "990000000260",
"biller_code": "70012"
}
self.parsed_response = VirtualAccountMandiriChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('echannel', self.parsed_response.payment_type)
def test_payment_bank(self):
self.assertEqual('990000000260', self.parsed_response.bill_key)
def test_payment_vanumber(self):
self.assertEqual('70012', self.parsed_response.biller_code)
| bsd-3-clause |
NicholasColotouros/RaspiCorder | RaspiCorder/Menus.py | 1 | 2928 | #!/usr/bin/python
from time import sleep
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
class Instrument:
drums = 1
guitar = 2
bass = 3
other = 4
@staticmethod
def instrumentName(num):
if num == 1:
return "drums"
elif num == 2:
return "guitar"
elif num == 3:
return "bass"
else:
return "other"
class ConfirmationMenu:
menuText = None
selected = None
lcd = None
def __init__(self, plcd, instrument):
self.menuText = " START REC " + instrument + "\n RESELECT instr"
self.lcd = plcd
self.selected = 0
def InstrumentConfirm(self):
lcd = self.lcd
lcd.clear()
lcd.message(self.menuText)
lcd.blink()
while True:
lcd.setCursor(0, self.selected)
if lcd.buttonPressed(lcd.UP):
self.selected = 0
elif lcd.buttonPressed(lcd.DOWN):
self.selected = 1
elif lcd.buttonPressed(lcd.SELECT):
lcd.noBlink()
if self.selected == 1:
return False
else:
return True
class InstrumentMenu:
instrumentSelection = " Drums Bass\n Guitar Other"
selected = 1
delayTime = 0.5 # The time it takes to look for another button press
def __init__(self):
selected = Instrument.drums
delayTime = 0.5
def updateCursor(self, lcd):
if self.selected == Instrument.drums:
lcd.setCursor(0,0)
elif self.selected == Instrument.guitar:
lcd.setCursor(0,1)
elif self.selected == Instrument.bass:
lcd.setCursor(10,0)
else:
lcd.setCursor(10,1)
def getInstrumentInput(self, lcd):
lcd.clear()
lcd.message(self.instrumentSelection)
lcd.blink()
while True:
self.updateCursor(lcd)
# Move left
if lcd.buttonPressed(lcd.LEFT):
if self.selected == Instrument.bass:
self.selected = Instrument.drums
sleep(self.delayTime)
elif self.selected == Instrument.other:
self.selected = Instrument.guitar
sleep(self.delayTime)
# Move right
elif lcd.buttonPressed(lcd.RIGHT):
if self.selected == Instrument.drums:
self.selected = Instrument.bass
sleep(self.delayTime)
elif self.selected == Instrument.guitar:
self.selected = Instrument.other
sleep(self.delayTime)
# Move up
elif lcd.buttonPressed(lcd.UP):
if self.selected == Instrument.guitar:
self.selected = Instrument.drums
sleep(self.delayTime)
elif self.selected == Instrument.other:
self.selected = Instrument.bass
sleep(self.delayTime)
# Move down
elif lcd.buttonPressed(lcd.DOWN):
if self.selected == Instrument.drums:
self.selected = Instrument.guitar
sleep(self.delayTime)
elif self.selected == Instrument.bass:
self.selected = Instrument.other
sleep(self.delayTime)
# Select the current entry
elif lcd.buttonPressed(lcd.SELECT):
lcd.noBlink()
return self.selected | gpl-2.0 |
jtopjian/st2 | st2api/tests/unit/controllers/v1/test_policies.py | 2 | 10587 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from six.moves import http_client
from st2common.models.api.policy import PolicyTypeAPI, PolicyAPI
from st2common.persistence.policy import PolicyType, Policy
from st2common.transport.publishers import PoolPublisher
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
TEST_FIXTURES = {
'policytypes': [
'policy_type_1.yaml',
'policy_type_2.yaml'
],
'policies': [
'policy_1.yaml',
'policy_2.yaml'
]
}
PACK = 'generic'
LOADER = FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
class PolicyTypeControllerTest(FunctionalTest):
base_url = '/v1/policytypes'
@classmethod
def setUpClass(cls):
super(PolicyTypeControllerTest, cls).setUpClass()
for _, fixture in six.iteritems(FIXTURES['policytypes']):
instance = PolicyTypeAPI(**fixture)
PolicyType.add_or_update(PolicyTypeAPI.to_model(instance))
def test_get_all(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
def test_filter(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_all(filter='resource_type=%s&name=%s' %
(selected['resource_type'], selected['name']))
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected['id'])
resp = self.__do_get_all(filter='name=%s' % selected['name'])
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected['id'])
resp = self.__do_get_all(filter='resource_type=%s' % selected['resource_type'])
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 1)
def test_filter_empty(self):
resp = self.__do_get_all(filter='resource_type=yo&name=whatever')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 0)
def test_get_one(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_one(selected['id'])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected['id'])
resp = self.__do_get_one(selected['ref'])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected['id'])
def test_get_one_fail(self):
resp = self.__do_get_one('1')
self.assertEqual(resp.status_int, 404)
@staticmethod
def __get_obj_id(resp, idx=-1):
return resp.json['id'] if idx < 0 else resp.json[idx]['id']
def __do_get_all(self, filter=None):
url = '%s?%s' % (self.base_url, filter) if filter else self.base_url
return self.app.get(url, expect_errors=True)
def __do_get_one(self, id):
return self.app.get('%s/%s' % (self.base_url, id), expect_errors=True)
class PolicyControllerTest(FunctionalTest):
base_url = '/v1/policies'
@classmethod
def setUpClass(cls):
super(PolicyControllerTest, cls).setUpClass()
for _, fixture in six.iteritems(FIXTURES['policies']):
instance = PolicyAPI(**fixture)
Policy.add_or_update(PolicyAPI.to_model(instance))
def test_get_all(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
def test_filter(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_all(filter='pack=%s&name=%s' % (selected['pack'], selected['name']))
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected['id'])
resp = self.__do_get_all(filter='name=%s' % selected['name'])
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected['id'])
resp = self.__do_get_all(filter='pack=%s' % selected['pack'])
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 1)
def test_filter_empty(self):
resp = self.__do_get_all(filter='pack=yo&name=whatever')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 0)
def test_get_one(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_one(selected['id'])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected['id'])
resp = self.__do_get_one(selected['ref'])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected['id'])
def test_get_one_fail(self):
resp = self.__do_get_one('1')
self.assertEqual(resp.status_int, 404)
def test_crud(self):
instance = self.__create_instance()
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
get_resp = self.__do_get_one(self.__get_obj_id(post_resp))
self.assertEqual(get_resp.status_int, http_client.OK)
updated_input = get_resp.json
updated_input['enabled'] = not updated_input['enabled']
put_resp = self.__do_put(self.__get_obj_id(post_resp), updated_input)
self.assertEqual(put_resp.status_int, http_client.OK)
self.assertEqual(put_resp.json['enabled'], updated_input['enabled'])
del_resp = self.__do_delete(self.__get_obj_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.NO_CONTENT)
def test_post_duplicate(self):
instance = self.__create_instance()
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
post_dup_resp = self.__do_post(instance)
self.assertEqual(post_dup_resp.status_int, http_client.CONFLICT)
del_resp = self.__do_delete(self.__get_obj_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.NO_CONTENT)
def test_put_not_found(self):
updated_input = self.__create_instance()
put_resp = self.__do_put('12345', updated_input)
self.assertEqual(put_resp.status_int, http_client.NOT_FOUND)
def test_put_sys_pack(self):
instance = self.__create_instance()
instance['pack'] = 'core'
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
updated_input = post_resp.json
updated_input['enabled'] = not updated_input['enabled']
put_resp = self.__do_put(self.__get_obj_id(post_resp), updated_input)
self.assertEqual(put_resp.status_int, http_client.BAD_REQUEST)
self.assertEqual(put_resp.json['faultstring'],
"Resources belonging to system level packs can't be manipulated")
# Clean up manually since API won't delete object in sys pack.
Policy.delete(Policy.get_by_id(self.__get_obj_id(post_resp)))
def test_delete_not_found(self):
del_resp = self.__do_delete('12345')
self.assertEqual(del_resp.status_int, http_client.NOT_FOUND)
def test_delete_sys_pack(self):
instance = self.__create_instance()
instance['pack'] = 'core'
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
del_resp = self.__do_delete(self.__get_obj_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.BAD_REQUEST)
self.assertEqual(del_resp.json['faultstring'],
"Resources belonging to system level packs can't be manipulated")
# Clean up manually since API won't delete object in sys pack.
Policy.delete(Policy.get_by_id(self.__get_obj_id(post_resp)))
@staticmethod
def __create_instance():
return {
'name': 'myaction.mypolicy',
'pack': 'mypack',
'resource_ref': 'mypack.myaction',
'policy_type': FIXTURES['policytypes'].values()[0]['name'],
'parameters': {
'k1': 'v1'
}
}
@staticmethod
def __get_obj_id(resp, idx=-1):
return resp.json['id'] if idx < 0 else resp.json[idx]['id']
def __do_get_all(self, filter=None):
url = '%s?%s' % (self.base_url, filter) if filter else self.base_url
return self.app.get(url, expect_errors=True)
def __do_get_one(self, id):
return self.app.get('%s/%s' % (self.base_url, id), expect_errors=True)
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
def __do_post(self, instance):
return self.app.post_json(self.base_url, instance, expect_errors=True)
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
def __do_put(self, id, instance):
return self.app.put_json('%s/%s' % (self.base_url, id), instance, expect_errors=True)
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
def __do_delete(self, id):
return self.app.delete('%s/%s' % (self.base_url, id), expect_errors=True)
| apache-2.0 |
mat128/netman | tests/core/switch_session_test.py | 3 | 7133 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import time
from flexmock import flexmock
from hamcrest import assert_that, is_
import mock
from netman.core.objects.exceptions import UnknownResource, \
OperationNotCompleted, NetmanException, SessionAlreadyExists
from netman.core.switch_sessions import SwitchSessionManager
class SwitchSessionManagerTest(TestCase):
def setUp(self):
self.switch_mock = flexmock()
self.session_manager = SwitchSessionManager()
def tearDown(self):
for timer in self.session_manager.timers.values():
timer.cancel()
def test_open_session_generates_with_passed_session_id(self):
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('start_transaction').once().ordered()
self.switch_mock.should_receive('disconnect').never()
assert_that(self.session_manager.open_session(self.switch_mock, 'patate'), is_('patate'))
assert_that(self.session_manager.get_switch_for_session('patate'), is_(self.switch_mock))
def test_open_session_with_session_that_already_exists_raises_an_exception(self):
self.switch_mock.should_receive('connect').never()
self.switch_mock.should_receive('start_transaction').never()
self.switch_mock.should_receive('disconnect').never()
self.session_manager.sessions['i_already_exist_buddy'] = 'stuff'
with self.assertRaises(SessionAlreadyExists):
self.session_manager.open_session(self.switch_mock, 'i_already_exist_buddy')
def test_open_failing_session_closes_connection(self):
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('start_transaction').once().ordered().and_raise(NetmanException())
self.switch_mock.should_receive('disconnect').once().ordered()
with self.assertRaises(NetmanException):
self.session_manager.open_session(self.switch_mock, 'patate')
def test_close_session(self):
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('start_transaction').once().ordered()
assert_that(self.session_manager.open_session(self.switch_mock, 'patate'), is_('patate'))
assert_that(self.session_manager.get_switch_for_session('patate'), is_(self.switch_mock))
self.switch_mock.should_receive('end_transaction').once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
self.session_manager.close_session('patate')
with self.assertRaises(UnknownResource):
self.session_manager.get_switch_for_session('patate')
def test_session_should_close_itself_after_timeout(self):
self.session_manager.session_inactivity_timeout = 0.5
switch_mock = mock.Mock()
assert_that(self.session_manager.open_session(switch_mock, 'patate'), is_('patate'))
assert_that(self.session_manager.get_switch_for_session('patate'), is_(switch_mock))
switch_mock.connect.assert_called_with()
switch_mock.start_transaction.assert_called_with()
time.sleep(0.6)
switch_mock.rollback_transaction.assert_called_with()
switch_mock.end_transaction.assert_called_with()
switch_mock.disconnect.assert_called_with()
with self.assertRaises(UnknownResource):
self.session_manager.get_switch_for_session('patate')
def test_session_timeout_should_reset_on_activity(self):
self.session_manager.session_inactivity_timeout = 1
switch_mock = mock.Mock()
assert_that(self.session_manager.open_session(switch_mock, 'patate'), is_('patate'))
assert_that(self.session_manager.get_switch_for_session('patate'), is_(switch_mock))
switch_mock.connect.assert_called_with()
switch_mock.start_transaction.assert_called_with()
time.sleep(0.5)
self.session_manager.keep_alive('patate')
time.sleep(0.5)
assert_that(switch_mock.rollback_transaction.called, is_(False))
assert_that(switch_mock.end_transaction.called, is_(False))
assert_that(switch_mock.disconnect.called, is_(False))
self.session_manager.keep_alive('patate')
time.sleep(1.1)
switch_mock.rollback_transaction.assert_called_with()
switch_mock.end_transaction.assert_called_with()
switch_mock.disconnect.assert_called_with()
with self.assertRaises(UnknownResource):
self.session_manager.get_switch_for_session('patate')
def test_commit_session(self):
self.session_manager.keep_alive = mock.Mock()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('start_transaction').once().ordered()
session_id = self.session_manager.open_session(self.switch_mock, 'patate')
self.switch_mock.should_receive('commit_transaction').once().ordered()
self.assertEquals(session_id, 'patate')
self.session_manager.commit_session(session_id)
self.session_manager.keep_alive.assert_called_with(session_id)
def test_rollback_session(self):
self.session_manager.keep_alive = mock.Mock()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('start_transaction').once().ordered()
session_id = self.session_manager.open_session(self.switch_mock, 'patate')
self.assertEquals(session_id, 'patate')
self.switch_mock.should_receive('rollback_transaction').once().ordered()
self.session_manager.rollback_session(session_id)
self.session_manager.keep_alive.assert_called_with(session_id)
def test_unknown_session(self):
with self.assertRaises(UnknownResource):
self.session_manager.get_switch_for_session('patate')
def test_close_session_with_error(self):
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('start_transaction').once().ordered()
self.session_manager.open_session(self.switch_mock, 'patate')
self.switch_mock.should_receive('end_transaction').and_raise(OperationNotCompleted()).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
with self.assertRaises(OperationNotCompleted):
self.session_manager.close_session('patate')
with self.assertRaises(UnknownResource):
self.session_manager.get_switch_for_session('patate')
| apache-2.0 |
chugle/myapp | applications/welcome/controllers/appadmin.py | 6 | 15339 | # -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
response.subtitle = 'Database Administration (appadmin)'
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
ignore_rw = True
response.view = 'appadmin.html'
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
stop = start + 100
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value=T('submit')))),
_action=URL(r=request, args=request.args))
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query).count()
if form.vars.update_check and form.vars.update_fields:
db(query).update(**eval_in_global_env('dict(%s)'
% form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query).count()
if orderby:
rows = db(query, ignore_common_filters=True).select(limitby=(
start, stop), orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(str(T('or import from csv file')) + " ",
INPUT(_type='file', _name='csvfile'),
INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')))
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb,
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]], ignore_common_filters=True).select().first()
else:
record = db(db[table].id == request.args(
2), ignore_common_filters=True).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
for key, value in cache.ram.storage.items():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
folder = os.path.join(request.folder,'cache')
if not os.path.exists(folder):
os.mkdir(folder)
locker = open(os.path.join(folder, 'cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(folder, 'cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['entries'] = ram['entries'] + disk['entries']
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
total['keys'] = ram['keys'] + disk['keys']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
| gpl-2.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/multiprocessing/popen_spawn_win32.py | 102 | 2998 | import os
import msvcrt
import signal
import sys
import _winapi
from . import context
from . import spawn
from . import reduction
from . import util
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
method = 'spawn'
def __init__(self, process_obj):
prep_data = spawn.get_preparation_data(process_obj._name)
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rhandle, whandle = _winapi.CreatePipe(None, 0)
wfd = msvcrt.open_osfhandle(whandle, 0)
cmd = spawn.get_command_line(parent_pid=os.getpid(),
pipe_handle=rhandle)
cmd = ' '.join('"%s"' % x for x in cmd)
with open(wfd, 'wb', closefd=True) as to_child:
# start process
try:
hp, ht, pid, tid = _winapi.CreateProcess(
spawn.get_executable(), cmd,
None, None, False, 0, None, None, None)
_winapi.CloseHandle(ht)
except:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
context.set_spawning_popen(self)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
context.set_spawning_popen(None)
def duplicate_for_child(self, handle):
assert self is context.get_spawning_popen()
return reduction.duplicate(handle, self.sentinel)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _winapi.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
if res == _winapi.WAIT_OBJECT_0:
code = _winapi.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_winapi.TerminateProcess(int(self._handle), TERMINATE)
except OSError:
if self.wait(timeout=1.0) is None:
raise
| lgpl-3.0 |
yfried/ansible | lib/ansible/modules/cloud/openstack/os_recordset.py | 29 | 7460 | #!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_recordset
short_description: Manage OpenStack DNS recordsets
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
updated. Only the I(records), I(description), and I(ttl) values
can be updated.
options:
zone:
description:
- Zone managing the recordset
required: true
name:
description:
- Name of the recordset
required: true
recordset_type:
description:
- Recordset type
required: true
records:
description:
- List of recordset definitions
required: true
description:
description:
- Description of the recordset
ttl:
description:
- TTL (Time To Live) value in seconds
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a recordset named "www.example.net."
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
recordset_type: primary
records: ['10.1.1.1']
description: test recordset
ttl: 3600
# Update the TTL on existing "www.example.net." recordset
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
ttl: 7200
# Delete recorset named "www.example.net."
- os_recordset:
cloud: mycloud
state: absent
zone: example.net.
name: www
'''
RETURN = '''
recordset:
description: Dictionary describing the recordset.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique recordset ID
type: string
sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
name:
description: Recordset name
type: string
sample: "www.example.net."
zone_id:
description: Zone id
type: string
sample: 9508e177-41d8-434e-962c-6fe6ca880af7
type:
description: Recordset type
type: string
sample: "A"
description:
description: Recordset description
type: string
sample: "Test description"
ttl:
description: Zone TTL value
type: int
sample: 3600
records:
description: Recordset records
type: list
sample: ['10.0.0.1']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, records, description, ttl, zone, recordset):
if state == 'present':
if recordset is None:
return True
if records is not None and recordset.records != records:
return True
if description is not None and recordset.description != description:
return True
if ttl is not None and recordset.ttl != ttl:
return True
if state == 'absent' and recordset:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
zone=dict(required=True),
name=dict(required=True),
recordset_type=dict(required=False),
records=dict(required=False, type='list'),
description=dict(required=False, default=None),
ttl=dict(required=False, default=None, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
required_if=[
('state', 'present',
['recordset_type', 'records'])],
supports_check_mode=True,
**module_kwargs)
zone = module.params.get('zone')
name = module.params.get('name')
state = module.params.get('state')
sdk, cloud = openstack_cloud_from_module(module)
try:
recordset_type = module.params.get('recordset_type')
recordset_filter = {'type': recordset_type}
recordsets = cloud.search_recordsets(zone, name_or_id=name + '.' + zone, filters=recordset_filter)
if len(recordsets) == 1:
recordset = recordsets[0]
try:
recordset_id = recordset['id']
except KeyError as e:
module.fail_json(msg=str(e))
else:
# recordsets is filtered by type and should never be more than 1 return
recordset = None
if state == 'present':
records = module.params.get('records')
description = module.params.get('description')
ttl = module.params.get('ttl')
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
records, description,
ttl, zone,
recordset))
if recordset is None:
recordset = cloud.create_recordset(
zone=zone, name=name, recordset_type=recordset_type,
records=records, description=description, ttl=ttl)
changed = True
else:
if records is None:
records = []
pre_update_recordset = recordset
changed = _system_state_change(state, records,
description, ttl,
zone, pre_update_recordset)
if changed:
zone = cloud.update_recordset(
zone, recordset_id,
records=records,
description=description,
ttl=ttl)
module.exit_json(changed=changed, recordset=recordset)
elif state == 'absent':
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
None, None,
None,
None, recordset))
if recordset is None:
changed = False
else:
cloud.delete_recordset(zone, recordset_id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
ppwwyyxx/tensorpack | examples/DoReFa-Net/resnet-dorefa.py | 1 | 6148 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: resnet-dorefa.py
import argparse
import numpy as np
import os
import cv2
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils.varreplace import remap_variables
from dorefa import get_dorefa
from imagenet_utils import ImageNetModel, eval_classification, fbresnet_augmentor
"""
This script loads the pre-trained ResNet-18 model with (W,A,G) = (1,4,32)
It has 59.2% top-1 and 81.5% top-5 validation error on ILSVRC12 validation set.
To run on images:
./resnet-dorefa.py --load ResNet-18-14f.npz --run a.jpg b.jpg
To eval on ILSVRC validation set:
./resnet-dorefa.py --load ResNet-18-14f.npz --eval --data /path/to/ILSVRC
"""
BITW = 1
BITA = 4
BITG = 32
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec([None, 224, 224, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]
def build_graph(self, image, label):
image = image / 256.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
def new_get_variable(v):
name = v.op.name
# don't binarize first and last layer
if not name.endswith('W') or 'conv1' in name or 'fct' in name:
return v
else:
logger.info("Binarizing weight {}".format(v.op.name))
return fw(v)
def nonlin(x):
return tf.clip_by_value(x, 0.0, 1.0)
def activate(x):
return fa(nonlin(x))
def resblock(x, channel, stride):
def get_stem_full(x):
return (LinearWrap(x)
.Conv2D('c3x3a', channel, 3)
.BatchNorm('stembn')
.apply(activate)
.Conv2D('c3x3b', channel, 3)())
channel_mismatch = channel != x.get_shape().as_list()[3]
if stride != 1 or channel_mismatch or 'pool1' in x.name:
# handling pool1 is to work around an architecture bug in our model
if stride != 1 or 'pool1' in x.name:
x = AvgPooling('pool', x, stride, stride)
x = BatchNorm('bn', x)
x = activate(x)
shortcut = Conv2D('shortcut', x, channel, 1)
stem = get_stem_full(x)
else:
shortcut = x
x = BatchNorm('bn', x)
x = activate(x)
stem = get_stem_full(x)
return shortcut + stem
def group(x, name, channel, nr_block, stride):
with tf.variable_scope(name + 'blk1'):
x = resblock(x, channel, stride)
for i in range(2, nr_block + 1):
with tf.variable_scope(name + 'blk{}'.format(i)):
x = resblock(x, channel, 1)
return x
with remap_variables(new_get_variable), \
argscope(BatchNorm, decay=0.9, epsilon=1e-4), \
argscope(Conv2D, use_bias=False, nl=tf.identity):
logits = (LinearWrap(image)
# use explicit padding here, because our private training framework has
# different padding mechanisms from TensorFlow
.tf.pad([[0, 0], [3, 2], [3, 2], [0, 0]])
.Conv2D('conv1', 64, 7, stride=2, padding='VALID', use_bias=True)
.tf.pad([[0, 0], [1, 1], [1, 1], [0, 0]], 'SYMMETRIC')
.MaxPooling('pool1', 3, 2, padding='VALID')
.apply(group, 'conv2', 64, 2, 1)
.apply(group, 'conv3', 128, 2, 2)
.apply(group, 'conv4', 256, 2, 2)
.apply(group, 'conv5', 512, 2, 2)
.BatchNorm('lastbn')
.apply(nonlin)
.GlobalAvgPooling('gap')
.tf.multiply(49) # this is due to a bug in our model design
.FullyConnected('fct', 1000)())
tf.nn.softmax(logits, name='output')
ImageNetModel.compute_loss_and_error(logits, label)
def get_inference_augmentor():
return fbresnet_augmentor(False)
def run_image(model, sess_init, inputs):
pred_config = PredictConfig(
model=model,
session_init=sess_init,
input_names=['input'],
output_names=['output']
)
predict_func = OfflinePredictor(pred_config)
meta = dataset.ILSVRCMeta()
words = meta.get_synset_words_1000()
transformers = get_inference_augmentor()
for f in inputs:
assert os.path.isfile(f)
img = cv2.imread(f).astype('float32')
assert img is not None
img = transformers.augment(img)[np.newaxis, :, :, :]
o = predict_func(img)
prob = o[0][0]
ret = prob.argsort()[-10:][::-1]
names = [words[i] for i in ret]
print(f + ":")
print(list(zip(names, prob[ret])))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the physical ids of GPUs to use')
parser.add_argument('--load', help='load a npz pretrained model')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--dorefa',
help='number of bits for W,A,G, separated by comma. Defaults to \'1,4,32\'',
default='1,4,32')
parser.add_argument(
'--run', help='run on a list of images with the pretrained model', nargs='*')
parser.add_argument('--eval', action='store_true')
args = parser.parse_args()
BITW, BITA, BITG = map(int, args.dorefa.split(','))
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.eval:
ds = dataset.ILSVRC12(args.data, 'val', shuffle=False)
ds = AugmentImageComponent(ds, get_inference_augmentor())
ds = BatchData(ds, 192, remainder=True)
eval_classification(Model(), SmartInit(args.load), ds)
elif args.run:
assert args.load.endswith('.npz')
run_image(Model(), SmartInit(args.load), args.run)
| apache-2.0 |
udaciouspeople/site | themes/tranquilpeak/.node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
JudoWill/glue | glue/core/tests/test_subset_group.py | 2 | 6779 | from mock import MagicMock, patch
import numpy as np
from .. import DataCollection, Data, SubsetGroup
from .. import subset
from ..subset import SubsetState
from ..subset_group import coerce_subset_groups
from .test_state import clone
class TestSubsetGroup(object):
def setup_method(self, method):
x = Data(label='x', x=[1, 2, 3])
y = Data(label='y', y=[2, 4, 8])
self.dc = DataCollection([x, y])
self.sg = SubsetGroup()
def test_creation(self):
self.sg.register(self.dc)
sg = self.sg
for subset, data in zip(sg.subsets, self.dc):
assert subset is data.subsets[0]
def test_attributes_matched_to_group(self):
self.sg.register(self.dc)
sg = self.sg
for subset in sg.subsets:
assert subset.subset_state is sg.subset_state
assert subset.label is sg.label
def test_attributes_synced_to_group(self):
self.sg.register(self.dc)
sg = self.sg
sg.subsets[0].subset_state = SubsetState()
sg.subsets[0].label = 'testing'
for subset in sg.subsets:
assert subset.subset_state is sg.subset_state
assert subset.label is sg.label
def test_set_style_overrides(self):
self.sg.register(self.dc)
sg = self.sg
sg.subsets[0].style.color = 'blue'
for s in sg.subsets[1:]:
assert s.style.color != 'blue'
assert sg.subsets[0].style.color == 'blue'
def test_new_subset_group_syncs_style(self):
sg = self.dc.new_subset_group()
for s in sg.subsets:
assert s.style == sg.style
def test_set_group_style_clears_override(self):
sg = self.dc.new_subset_group()
style = sg.style.copy()
style.parent = sg.subsets[0]
sg.subsets[0].style = style
style.color = 'blue'
sg.style.color = 'red'
assert sg.subsets[0].style.color == 'red'
def test_new_data_creates_subset(self):
sg = self.dc.new_subset_group()
d = Data(label='z', z=[10, 20, 30])
self.dc.append(d)
assert d.subsets[0] in sg.subsets
def test_remove_data_deletes_subset(self):
sg = self.dc.new_subset_group()
sub = self.dc[0].subsets[0]
self.dc.remove(self.dc[0])
assert sub not in sg.subsets
def test_subsets_given_data_reference(self):
sg = self.dc.new_subset_group()
assert sg.subsets[0].data is self.dc[0]
def test_data_collection_subset(self):
sg = self.dc.new_subset_group()
assert tuple(self.dc.subset_groups) == (sg,)
sg2 = self.dc.new_subset_group()
assert tuple(self.dc.subset_groups) == (sg, sg2)
def test_remove_subset(self):
sg = self.dc.new_subset_group()
n = len(self.dc[0].subsets)
self.dc.remove_subset_group(sg)
assert len(self.dc[0].subsets) == n - 1
def test_edit_broadcasts(self):
sg = self.dc.new_subset_group()
bcast = MagicMock()
sg.subsets[0].broadcast = bcast
bcast.reset_mock()
sg.subsets[0].style.color = 'red'
assert bcast.call_count == 1
def test_braodcast(self):
sg = self.dc.new_subset_group()
bcast = MagicMock()
sg.subsets[0].broadcast = bcast
bcast.reset_mock()
sg.subset_state = SubsetState()
assert bcast.call_count == 1
sg.style.color = '#123456'
assert bcast.call_count == 2
sg.label = 'new label'
assert bcast.call_count == 3
def test_auto_labeled(self):
sg = self.dc.new_subset_group()
assert sg.label is not None
def test_label_color_cycle(self):
sg1 = self.dc.new_subset_group()
sg2 = self.dc.new_subset_group()
assert sg1.label != sg2.label
assert sg1.style.color != sg2.style.color
def test_new_label(self):
sg = self.dc.new_subset_group(label='test')
assert sg.label == 'test'
def test_new_state(self):
state = SubsetState()
sg = self.dc.new_subset_group(subset_state=state)
assert sg.subset_state is state
def test_deleted_subsets_dont_respawn(self):
# regression test
sg1 = self.dc.new_subset_group()
self.dc.remove_subset_group(sg1)
d = Data(label='z', z=[1, 2, 3])
self.dc.append(d)
assert len(d.subsets) == 0
class TestSerialze(TestSubsetGroup):
def test_save_group(self):
sg = self.dc.new_subset_group()
sg2 = clone(sg)
assert sg.style == sg2.style
assert sg.label == sg2.label
def test_save_subset(self):
sg = self.dc.new_subset_group()
sg.subset_state = self.dc[0].id['x'] > 1
sub = sg.subsets[0]
dc = clone(self.dc)
sub2 = dc[0].subsets[0]
np.testing.assert_array_equal(sub2.to_mask(), [False, True, True])
assert sub2.style == sg.style
assert sub2.label == sg.label
def test_save_override(self):
sg = self.dc.new_subset_group()
sg.subsets[0].style.color = 'blue'
dc = clone(self.dc)
assert dc.subset_groups[0].style == sg.style
assert dc.subset_groups[0].subsets[0].style.color == 'blue'
class TestCombination(object):
def check_type_and_children(self, s1, s2, s3, statetype):
assert isinstance(s3, statetype)
assert s3.state1 is s1.subset_state
assert s3.state2 is s2.subset_state
def test_and(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 & s2, subset.AndState)
def test_or(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 | s2, subset.OrState)
def test_xor(self):
s1, s2 = SubsetGroup(), SubsetGroup()
assert isinstance(s1 ^ s2, subset.XorState)
def test_invert(self):
s1 = SubsetGroup()
assert isinstance(~s1, subset.InvertState)
class TestCoerce(object):
def setup_method(self, method):
self.x = Data(label='x', x=[1, 2, 3])
self.y = Data(label='y', y=[1, 2, 3])
self.dc = DataCollection([self.x, self.y])
def test_noop_on_good_setup(self):
with patch('glue.core.subset_group.warn') as warn:
coerce_subset_groups(self.dc)
assert warn.call_count == 0
def test_reassign_non_grouped_subsets(self):
s = self.x.new_subset()
dc = self.dc
with patch('glue.core.subset_group.warn') as warn:
coerce_subset_groups(dc)
assert len(dc.subset_groups) == 1
assert dc.subset_groups[0].subset_state is s.subset_state
assert dc.subset_groups[0].style == s.style
assert dc.subset_groups[0].label == s.label
assert warn.call_count == 1
| bsd-3-clause |
flavour/RedHat | modules/ClimateDataPortal/Cache.py | 41 | 2217 |
import errno
import os
from os.path import join, exists
from os import stat, makedirs
# create folder for cache:
# mkdir -p /tmp/climate_data_portal/images/recent/
# mkdir -p /tmp/climate_data_portal/images/older/
MAX_CACHE_FOLDER_SIZE = 2**24 # 16 MiB
class TwoStageCache(object):
def __init__(self, folder, max_size):
self.folder = folder
self.max_size
def purge(self):
pass
def retrieve(self, file_name, generate_if_not_found):
pass
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else: raise
def get_cached_or_generated_file(cache_file_name, generate):
# this needs to become a setting
climate_data_image_cache_path = join(
"/tmp","climate_data_portal","images"
)
recent_cache = join(climate_data_image_cache_path, "recent")
mkdir_p(recent_cache)
older_cache = join(climate_data_image_cache_path, "older")
mkdir_p(older_cache)
recent_cache_path = join(recent_cache, cache_file_name)
if not exists(recent_cache_path):
older_cache_path = join(older_cache, cache_file_name)
if exists(older_cache_path):
# move the older cache to the recent folder
rename(older_cache_path, recent_cache_path)
else:
generate(recent_cache_path)
file_path = recent_cache_path
# update the folder size file (race condition?)
folder_size_file_path = join(climate_data_image_cache_path, "size")
folder_size_file = open(folder_size_file_path, "w+")
folder_size_file_contents = folder_size_file.read()
try:
folder_size = int(folder_size_file_contents)
except ValueError:
folder_size = 0
folder_size_file.seek(0)
folder_size_file.truncate()
folder_size += stat(file_path).st_size
if folder_size > MAX_CACHE_FOLDER_SIZE:
rmdir(older_cache)
folder_size_file.write(str(folder_size))
folder_size_file.close()
else:
# use the existing cached image
file_path = recent_cache_path
return file_path
| mit |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/sanfrancisco/zone/aggregate_SSS_DDD_from_building.py | 2 | 2259 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
class aggregate_SSS_DDD_from_building(Variable):
"""aggregate SSS_DDD variable from building"""
_return_type="Int32"
def __init__(self, var_part1, var_part2):
self.variable_name = "%s_%s"% (var_part1,var_part2)
Variable.__init__(self)
def dependencies(self):
return ["sanfrancisco.building.zone_id",
"_aggregate_%s=zone.aggregate(sanfrancisco.building.%s, )" % (self.variable_name, self.variable_name)]
def compute(self, dataset_pool):
return self.get_dataset().get_attribute("_aggregate_%s" % self.variable_name)
def post_check(self, values, dataset_pool=None):
size = dataset_pool.get_dataset("building").get_attribute(self.variable_name).sum()
self.do_check("x >= 0 and x <= " + str(size), values)
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
from numpy import array
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['sanfrancisco','urbansim'],
test_data={
'building':
{"building_id": array([1,2,3,4,5,6]),
"parcel_id": array([1,1,2,2,3,3]),
"employment_of_sector_4":array([0,1,4,0,2,5]),
},
'parcel':
{
"parcel_id":array([1,2,3]),
"zone_id": array([1,1,2]),
},
'zone':
{
"zone_id": array([1,2]),
},
}
)
should_be = array([5, 7])
instance_name = 'sanfrancisco.zone.aggregate_employment_of_sector_4_from_building'
tester.test_is_equal_for_family_variable(self, should_be, instance_name)
if __name__=='__main__':
opus_unittest.main()
| gpl-2.0 |
duramato/SickRage | sickbeard/providers/t411.py | 2 | 8994 | # -*- coding: latin-1 -*-
# Author: djoole <bobby.djoole@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import traceback
import re
import datetime
import time
from requests.auth import AuthBase
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import show_name_helpers
from sickbeard import db
from sickbeard import helpers
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
class T411Provider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "T411")
self.supportsBacklog = True
self.public = False
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.token = None
self.tokenLastUpdate = None
self.cache = T411Cache(self)
self.urls = {'base_url': 'http://www.t411.in/',
'search': 'https://api.t411.in/torrents/search/%s?cid=%s&limit=100',
'rss': 'https://api.t411.in/torrents/top/today',
'login_page': 'https://api.t411.in/auth',
'download': 'https://api.t411.in/torrents/download/%s',
}
self.url = self.urls['base_url']
self.subcategories = [433, 637, 455, 639]
self.minseed = 0
self.minleech = 0
self.confirmed = False
def isEnabled(self):
return self.enabled
def _doLogin(self):
if self.token is not None:
if time.time() < (self.tokenLastUpdate + 30 * 60):
return True
login_params = {'username': self.username,
'password': self.password}
response = self.getURL(self.urls['login_page'], post_data=login_params, timeout=30, json=True)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if response and 'token' in response:
self.token = response['token']
self.tokenLastUpdate = time.time()
self.uid = response['uid'].encode('ascii', 'ignore')
self.session.auth = T411Auth(self.token)
return True
else:
logger.log(u"Token not found in authentication response", logger.WARNING)
return False
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_params.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
searchURLS = ([self.urls['search'] % (search_string, u) for u in self.subcategories], [self.urls['rss']])[mode == 'RSS']
for searchURL in searchURLS:
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.getURL(searchURL, json=True)
if not data:
continue
try:
if 'torrents' not in data and mode != 'RSS':
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrents = data['torrents'] if mode != 'RSS' else data
if not torrents:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for torrent in torrents:
if mode == 'RSS' and int(torrent['category']) not in self.subcategories:
continue
try:
title = torrent['name']
torrent_id = torrent['id']
download_url = (self.urls['download'] % torrent_id).encode('utf8')
if not all([title, download_url]):
continue
size = int(torrent['size'])
seeders = int(torrent['seeders'])
leechers = int(torrent['leechers'])
verified = bool(torrent['isVerified'])
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if self.confirmed and not verified and mode != 'RSS':
logger.log(u"Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception as e:
logger.log(u"Invalid torrent data, skipping result: %s" % torrent, logger.DEBUG)
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.DEBUG)
continue
except Exception, e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
#For each search mode sort all the items by seeders if available if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
searchResults = self._doSearch(searchString[0])
for item in searchResults:
title, url = self._get_title_and_url(item)
if title and url:
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class T411Auth(AuthBase):
"""Attaches HTTP Authentication to the given Request object."""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = self.token
return r
class T411Cache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll T411 every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = T411Provider()
| gpl-3.0 |
gitprouser/appengine-bottle-skeleton | lib/bs4/diagnose.py | 63 | 6747 | """Diagnostic functions, mainly for use when doing tech support."""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__license__ = "MIT"
import cProfile
from StringIO import StringIO
from HTMLParser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems."""
print "Diagnostic running on Beautiful Soup %s" % __version__
print "Python version %s" % sys.version
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print (
"I noticed that %s is not installed. Installing it may help." %
name)
if 'lxml' in basic_parsers:
basic_parsers.append(["lxml", "xml"])
try:
from lxml import etree
print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))
except ImportError, e:
print (
"lxml is not installed or couldn't be imported.")
if 'html5lib' in basic_parsers:
try:
import html5lib
print "Found html5lib version %s" % html5lib.__version__
except ImportError, e:
print (
"html5lib is not installed or couldn't be imported.")
if hasattr(data, 'read'):
data = data.read()
elif os.path.exists(data):
print '"%s" looks like a filename. Reading data from the file.' % data
with open(data) as fp:
data = fp.read()
elif data.startswith("http:") or data.startswith("https:"):
print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data
print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup."
return
print
for parser in basic_parsers:
print "Trying to parse your markup with %s" % parser
success = False
try:
soup = BeautifulSoup(data, parser)
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "Here's what %s did with the markup:" % parser
print soup.prettify()
print "-" * 80
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running.
"""
from lxml import etree
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
print("%s, %4s, %s" % (event, element.tag, element.text))
class AnnouncingParser(HTMLParser):
"""Announces HTMLParser parse events, without doing anything else."""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def htmlparser_trace(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print "Comparative parser benchmark on Beautiful Soup %s" % __version__
data = rdoc(num_elements)
print "Generated a large invalid HTML document (%d bytes)." % len(data)
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "BS4+%s parsed the markup in %.2fs." % (parser, b-a)
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print "Raw lxml parsed the markup in %.2fs." % (b-a)
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print "Raw html5lib parsed the markup in %.2fs." % (b-a)
def profile(num_elements=100000, parser="lxml"):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
if __name__ == '__main__':
diagnose(sys.stdin.read())
| apache-2.0 |
libscie/liberator | liberator/lib/python3.6/site-packages/django/utils/text.py | 46 | 14941 | from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
from django.utils.safestring import SafeText, mark_safe
from django.utils.six.moves import html_entities
from django.utils.translation import pgettext, ugettext as _, ugettext_lazy
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
return x and force_text(x)[0].upper() + force_text(x)[1:]
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_chars = re.compile(r'<.*?>|(.)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
All white space is preserved except added line breaks consume the space on
which they break the line.
Long words are not wrapped, so the output text may have lines longer than
``width``.
"""
text = force_text(text)
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncates HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Closes opened tags if they were correctly closed in the given HTML.
Newlines in the HTML are preserved.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
@keep_lazy_text
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
@keep_lazy_text
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, force_text(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(six.text_type, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub(r'[^\w\s-]', '', value, flags=re.U).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value, flags=re.U))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, six.text_type)
| cc0-1.0 |
nilmini20s/gem5-2016-08-13 | src/mem/cache/prefetch/Prefetcher.py | 39 | 4355 | # Copyright (c) 2012, 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Mitch Hayenga
from ClockedObject import ClockedObject
from m5.params import *
from m5.proxy import *
class BasePrefetcher(ClockedObject):
type = 'BasePrefetcher'
abstract = True
cxx_header = "mem/cache/prefetch/base.hh"
sys = Param.System(Parent.any, "System this prefetcher belongs to")
on_miss = Param.Bool(False, "Only notify prefetcher on misses")
on_read = Param.Bool(True, "Notify prefetcher on reads")
on_write = Param.Bool(True, "Notify prefetcher on writes")
on_data = Param.Bool(True, "Notify prefetcher on data accesses")
on_inst = Param.Bool(True, "Notify prefetcher on instruction accesses")
class QueuedPrefetcher(BasePrefetcher):
type = "QueuedPrefetcher"
abstract = True
cxx_class = "QueuedPrefetcher"
cxx_header = "mem/cache/prefetch/queued.hh"
latency = Param.Int(1, "Latency for generated prefetches")
queue_size = Param.Int(32, "Maximum number of queued prefetches")
queue_squash = Param.Bool(True, "Squash queued prefetch on demand access")
queue_filter = Param.Bool(True, "Don't queue redundant prefetches")
cache_snoop = Param.Bool(False, "Snoop cache to eliminate redundant request")
tag_prefetch = Param.Bool(True, "Tag prefetch with PC of generating access")
class StridePrefetcher(QueuedPrefetcher):
type = 'StridePrefetcher'
cxx_class = 'StridePrefetcher'
cxx_header = "mem/cache/prefetch/stride.hh"
max_conf = Param.Int(7, "Maximum confidence level")
thresh_conf = Param.Int(4, "Threshold confidence level")
min_conf = Param.Int(0, "Minimum confidence level")
start_conf = Param.Int(4, "Starting confidence for new entries")
table_sets = Param.Int(16, "Number of sets in PC lookup table")
table_assoc = Param.Int(4, "Associativity of PC lookup table")
use_master_id = Param.Bool(True, "Use master id based history")
degree = Param.Int(4, "Number of prefetches to generate")
class TaggedPrefetcher(QueuedPrefetcher):
type = 'TaggedPrefetcher'
cxx_class = 'TaggedPrefetcher'
cxx_header = "mem/cache/prefetch/tagged.hh"
degree = Param.Int(2, "Number of prefetches to generate")
| bsd-3-clause |
tupolev/plugin.video.mitele | lib/youtube_dl/extractor/newstube.py | 16 | 4603 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
)
class NewstubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)'
_TEST = {
'url': 'http://www.newstube.ru/media/telekanal-cnn-peremestil-gorod-slavyansk-v-krym',
'md5': '801eef0c2a9f4089fa04e4fe3533abdc',
'info_dict': {
'id': '728e0ef2-e187-4012-bac0-5a081fdcb1f6',
'ext': 'mp4',
'title': 'Телеканал CNN переместил город Славянск в Крым',
'description': 'md5:419a8c9f03442bc0b0a794d689360335',
'duration': 31.05,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
video_guid = self._html_search_regex(
r'<meta property="og:video:url" content="https?://(?:www\.)?newstube\.ru/freshplayer\.swf\?guid=(?P<guid>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})',
page, 'video GUID')
player = self._download_xml(
'http://p.newstube.ru/v2/player.asmx/GetAutoPlayInfo6?state=&url=%s&sessionId=&id=%s&placement=profile&location=n2' % (url, video_guid),
video_guid, 'Downloading player XML')
def ns(s):
return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'}
error_message = player.find(ns('./ErrorMessage'))
if error_message is not None:
raise ExtractorError('%s returned error: %s' % (self.IE_NAME, error_message.text), expected=True)
session_id = player.find(ns('./SessionId')).text
media_info = player.find(ns('./Medias/MediaInfo'))
title = media_info.find(ns('./Name')).text
description = self._og_search_description(page)
thumbnail = media_info.find(ns('./KeyFrame')).text
duration = int(media_info.find(ns('./Duration')).text) / 1000.0
formats = []
for stream_info in media_info.findall(ns('./Streams/StreamInfo')):
media_location = stream_info.find(ns('./MediaLocation'))
if media_location is None:
continue
server = media_location.find(ns('./Server')).text
app = media_location.find(ns('./App')).text
media_id = stream_info.find(ns('./Id')).text
name = stream_info.find(ns('./Name')).text
width = int(stream_info.find(ns('./Width')).text)
height = int(stream_info.find(ns('./Height')).text)
formats.append({
'url': 'rtmp://%s/%s' % (server, app),
'app': app,
'play_path': '01/%s' % video_guid.upper(),
'rtmp_conn': ['S:%s' % session_id, 'S:%s' % media_id, 'S:n2'],
'page_url': url,
'ext': 'flv',
'format_id': 'rtmp' + ('-%s' % name if name else ''),
'width': width,
'height': height,
})
sources_data = self._download_json(
'http://www.newstube.ru/player2/getsources?guid=%s' % video_guid,
video_guid, fatal=False)
if sources_data:
for source in sources_data.get('Sources', []):
source_url = source.get('Src')
if not source_url:
continue
height = int_or_none(source.get('Height'))
f = {
'format_id': 'http' + ('-%dp' % height if height else ''),
'url': source_url,
'width': int_or_none(source.get('Width')),
'height': height,
}
source_type = source.get('Type')
if source_type:
mobj = re.search(r'codecs="([^,]+),\s*([^"]+)"', source_type)
if mobj:
vcodec, acodec = mobj.groups()
f.update({
'vcodec': vcodec,
'acodec': acodec,
})
formats.append(f)
self._check_formats(formats, video_guid)
self._sort_formats(formats)
return {
'id': video_guid,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| gpl-3.0 |
gfreed/android_external_chromium-org | tools/perf/benchmarks/kraken.py | 24 | 1580 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Mozilla's Kraken JavaScript benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
def _Mean(l):
return float(sum(l)) / len(l) if len(l) > 0 else 0.0
class KrakenMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
js_is_done = """
document.title.indexOf("Results") != -1 && document.readyState == "complete"
"""
def _IsDone():
return bool(tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 500, poll_interval=5)
js_get_results = """
var formElement = document.getElementsByTagName("input")[0];
decodeURIComponent(formElement.value.split("?")[1]);
"""
result_dict = eval(tab.EvaluateJavaScript(js_get_results))
total = 0
for key in result_dict:
if key == 'v':
continue
results.Add(key, 'ms', result_dict[key], data_type='unimportant')
total += _Mean(result_dict[key])
results.Add('Total', 'ms', total)
class Kraken(test.Test):
"""Mozilla's Kraken JavaScript benchmark."""
test = KrakenMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/kraken.json',
'pages': [
{ 'url': 'http://krakenbenchmark.mozilla.org/kraken-1.1/driver.html' }
]
}, os.path.abspath(__file__))
| bsd-3-clause |
nvoron23/arangodb | 3rdParty/V8-4.3.61/buildtools/clang_format/script/clang-format-diff.py | 50 | 3891 | #!/usr/bin/python
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git users:
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i
"""
import argparse
import difflib
import re
import string
import subprocess
import StringIO
import sys
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
def main():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|proto'
r'|protodevel)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument(
'-style',
help=
'formatting style to apply (LLVM, Google, Chromium, Mozilla, WebKit)')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.iteritems():
command = [binary, filename]
if args.i:
command.append('-i')
command.extend(lines)
if args.style:
command.extend(['-style', args.style])
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=None, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode);
if not args.i:
with open(filename) as f:
code = f.readlines()
formatted_code = StringIO.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = string.join(diff, '')
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
main()
| apache-2.0 |
piotr1212/carbon | lib/carbon/tests/test_rewrite.py | 14 | 7790 | from mock import Mock, mock_open, patch
from unittest import TestCase
from carbon.pipeline import Processor
from carbon.rewrite import PRE, RewriteProcessor, RewriteRule, RewriteRuleManager
class RewriteProcessorTest(TestCase):
def tearDown(self):
RewriteRuleManager.clear()
def test_registers_plugin(self):
self.assertTrue('rewrite' in Processor.plugins)
def test_applies_rule(self):
mock_rule = Mock(spec=RewriteRule)
RewriteRuleManager.rulesets[PRE] = [mock_rule]
list(RewriteProcessor(PRE).process('carbon.foo', (0, 0)))
mock_rule.apply.assert_called_once_with('carbon.foo')
def test_applies_rule_and_returns_metric(self):
mock_rule = Mock(spec=RewriteRule)
mock_rule.apply.return_value = 'carbon.foo.bar'
RewriteRuleManager.rulesets[PRE] = [mock_rule]
result = list(RewriteProcessor(PRE).process('carbon.foo', (0, 0)))
self.assertEqual(('carbon.foo.bar', (0, 0)), result[0])
def test_passes_through_with_no_rules(self):
result = list(RewriteProcessor(PRE).process('carbon.foo', (0, 0)))
self.assertEqual(('carbon.foo', (0, 0)), result[0])
class TestRewriteRule(TestCase):
def setUp(self):
self.sample_rule = RewriteRule('^carbon[.]foo[.]', 'carbon_foo.')
def test_instantiation_compiles_pattern(self):
self.assertTrue(hasattr(self.sample_rule.regex, 'sub'))
def test_apply_substitutes(self):
result = self.sample_rule.apply('carbon.foo.bar')
self.assertEqual('carbon_foo.bar', result)
class TestRewriteRuleManager(TestCase):
def setUp(self):
self.sample_config = """
[pre]
^carbon.foo = carbon.foo.bar
^carbon.bar = carbon.bar.baz
"""
self.sample_multi_config = """
[pre]
^carbon.foo = carbon.foo.bar
^carbon.bar = carbon.bar.baz
[post]
^carbon.baz = carbon.foo.bar
"""
self.broken_pattern_config = """
[pre]
^carbon.foo = carbon.foo.bar
^carbon.(bar = carbon.bar.baz
"""
self.commented_config = """
[pre]
^carbon.foo = carbon.foo.bar
#^carbon.bar = carbon.bar.baz
"""
def tearDown(self):
RewriteRuleManager.rules_file = None
RewriteRuleManager.rules_last_read = 0.0
RewriteRuleManager.clear()
def test_looping_call_reads_rules(self):
self.assertEqual(RewriteRuleManager.read_rules, RewriteRuleManager.read_task.f)
def test_request_for_nonexistent_rules_returns_iterable(self):
try:
iter(RewriteRuleManager.rules('foo'))
except TypeError:
self.fail("RewriteRuleManager.rules() returned a non-iterable type")
def test_read_from_starts_task(self):
with patch.object(RewriteRuleManager, 'read_rules'):
with patch.object(RewriteRuleManager.read_task, 'start') as task_start_mock:
RewriteRuleManager.read_from('foo.conf')
self.assertEqual(1, task_start_mock.call_count)
def test_read_records_mtime(self):
import carbon.rewrite
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', mock_open(), create=True):
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertEqual(1234, RewriteRuleManager.rules_last_read)
def test_read_clears_if_no_file(self):
import carbon.rewrite
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'exists', Mock(return_value=False)):
with patch.object(RewriteRuleManager, 'clear') as clear_mock:
RewriteRuleManager.read_rules()
clear_mock.assert_called_once_with()
def test_rules_unchanged_if_mtime_unchanged(self):
import carbon.rewrite
mtime = 1234
rulesets = {'pre': [Mock(RewriteRule)]}
RewriteRuleManager.rules_last_read = mtime
RewriteRuleManager.rulesets.update(rulesets)
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=mtime)):
RewriteRuleManager.read_rules()
self.assertEqual(rulesets, RewriteRuleManager.rulesets)
def test_read_doesnt_open_file_if_mtime_unchanged(self):
import carbon.rewrite
mtime = 1234
RewriteRuleManager.rules_last_read = mtime
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', mock_open(), create=True) as open_mock:
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertFalse(open_mock.called)
def test_read_opens_file_if_mtime_newer(self):
import carbon.rewrite
RewriteRuleManager.rules_last_read = 1234
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', mock_open(), create=True) as open_mock:
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=5678)):
RewriteRuleManager.read_rules()
self.assertTrue(open_mock.called)
def test_section_parsed_into_ruleset(self):
import carbon.rewrite
open_mock = Mock(return_value=iter(self.sample_config.splitlines()))
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', open_mock, create=True):
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertTrue('pre' in RewriteRuleManager.rulesets)
def test_multiple_section_parsed_into_ruleset(self):
import carbon.rewrite
open_mock = Mock(return_value=iter(self.sample_multi_config.splitlines()))
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', open_mock, create=True):
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertTrue('pre' in RewriteRuleManager.rulesets)
self.assertTrue('post' in RewriteRuleManager.rulesets)
def test_rules_parsed(self):
import carbon.rewrite
open_mock = Mock(return_value=iter(self.sample_config.splitlines()))
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', open_mock, create=True):
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertEqual(2, len(RewriteRuleManager.rules('pre')))
def test_broken_patterns_ignored(self):
import carbon.rewrite
open_mock = Mock(return_value=iter(self.broken_pattern_config.splitlines()))
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', open_mock, create=True):
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertEqual(1, len(RewriteRuleManager.rules('pre')))
def test_comments_ignored(self):
import carbon.rewrite
open_mock = Mock(return_value=iter(self.commented_config.splitlines()))
RewriteRuleManager.rules_file = 'foo.conf'
with patch.object(carbon.rewrite, 'open', open_mock, create=True):
with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)):
with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)):
RewriteRuleManager.read_rules()
self.assertEqual(1, len(RewriteRuleManager.rules('pre')))
| apache-2.0 |
WatanabeYasumasa/edx-platform | common/djangoapps/student/tests/test_create_account.py | 33 | 3344 | "Tests for account creation"
import ddt
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.transaction import rollback
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_settings
import mock
from user_api.models import UserPreference
from lang_pref import LANGUAGE_KEY
import student
TEST_CS_URL = 'https://comments.service.test:123/'
@ddt.ddt
class TestCreateAccount(TestCase):
"Tests for account creation"
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "test@example.org",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
@ddt.data("en", "eo")
def test_default_lang_pref_saved(self, lang):
with mock.patch("django.conf.settings.LANGUAGE_CODE", lang):
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang)
@ddt.data("en", "eo")
def test_header_lang_pref_saved(self, lang):
response = self.client.post(self.url, self.params, HTTP_ACCEPT_LANGUAGE=lang)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang)
@mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@mock.patch("lms.lib.comment_client.User.base_url", TEST_CS_URL)
@mock.patch("lms.lib.comment_client.utils.requests.request", return_value=mock.Mock(status_code=200, text='{}'))
class TestCreateCommentsServiceUser(TransactionTestCase):
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "test@example.org",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
def test_cs_user_created(self, request):
"If user account creation succeeds, we should create a comments service user"
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertTrue(request.called)
args, kwargs = request.call_args
self.assertEqual(args[0], 'put')
self.assertTrue(args[1].startswith(TEST_CS_URL))
self.assertEqual(kwargs['data']['username'], self.params['username'])
@mock.patch("student.models.Registration.register", side_effect=Exception)
def test_cs_user_not_created(self, register, request):
"If user account creation fails, we should not create a comments service user"
try:
response = self.client.post(self.url, self.params)
except:
pass
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=self.username)
self.assertTrue(register.called)
self.assertFalse(request.called)
| agpl-3.0 |
basicthinker/THNVM | src/mem/slicc/ast/FuncCallExprAST.py | 31 | 7898 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
from slicc.symbols import Func, Type
class FuncCallExprAST(ExprAST):
def __init__(self, slicc, proc_name, exprs):
super(FuncCallExprAST, self).__init__(slicc)
self.proc_name = proc_name
self.exprs = exprs
def __repr__(self):
return "[FuncCallExpr: %s %s]" % (self.proc_name, self.exprs)
def generate(self, code):
machine = self.state_machine
if self.proc_name == "DPRINTF":
# Code for inserting the location of the DPRINTF()
# statement in the .sm file in the statement it self.
# 'self.exprs[0].location' represents the location.
# 'format' represents the second argument of the
# original DPRINTF() call. It is left unmodified.
# str_list is used for concatenating the argument
# list following the format specifier. A DPRINTF()
# call may or may not contain any arguments following
# the format specifier. These two cases need to be
# handled differently. Hence the check whether or not
# the str_list is empty.
format = "%s" % (self.exprs[1].inline())
format_length = len(format)
str_list = []
for i in range(2, len(self.exprs)):
str_list.append("%s" % self.exprs[i].inline())
if len(str_list) == 0:
code('DPRINTF(RubySlicc, "$0: $1")',
self.exprs[0].location, format[2:format_length-2])
else:
code('DPRINTF(RubySlicc, "$0: $1", $2)',
self.exprs[0].location, format[2:format_length-2],
', '.join(str_list))
return self.symtab.find("void", Type)
# hack for adding comments to profileTransition
if self.proc_name == "APPEND_TRANSITION_COMMENT":
# FIXME - check for number of parameters
code("APPEND_TRANSITION_COMMENT($0)", self.exprs[0].inline())
return self.symtab.find("void", Type)
# Look up the function in the symbol table
func = self.symtab.find(self.proc_name, Func)
# Check the types and get the code for the parameters
if func is None:
self.error("Unrecognized function name: '%s'", self.proc_name)
if len(self.exprs) != len(func.param_types):
self.error("Wrong number of arguments passed to function : '%s'" +\
" Expected %d, got %d", self.proc_name,
len(func.param_types), len(self.exprs))
cvec = []
type_vec = []
for expr,expected_type in zip(self.exprs, func.param_types):
# Check the types of the parameter
actual_type,param_code = expr.inline(True)
if str(actual_type) != 'OOD' and \
str(actual_type) != str(expected_type):
expr.error("Type mismatch: expected: %s actual: %s" % \
(expected_type, actual_type))
cvec.append(param_code)
type_vec.append(expected_type)
# OK, the semantics of "trigger" here is that, ports in the
# machine have different priorities. We always check the first
# port for doable transitions. If nothing/stalled, we pick one
# from the next port.
#
# One thing we have to be careful as the SLICC protocol
# writter is : If a port have two or more transitions can be
# picked from in one cycle, they must be independent.
# Otherwise, if transition A and B mean to be executed in
# sequential, and A get stalled, transition B can be issued
# erroneously. In practice, in most case, there is only one
# transition should be executed in one cycle for a given
# port. So as most of current protocols.
if self.proc_name == "trigger":
code('''
{
''')
if machine.TBEType != None and machine.EntryType != None:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[3]}}, ${{cvec[1]}});
''')
elif machine.TBEType != None:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[1]}});
''')
elif machine.EntryType != None:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[1]}});
''')
else:
code('''
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[1]}});
''')
code('''
if (result == TransitionResult_Valid) {
counter++;
continue; // Check the first port again
}
if (result == TransitionResult_ResourceStall) {
scheduleEvent(Cycles(1));
// Cannot do anything with this transition, go check next doable transition (mostly likely of next port)
}
}
''')
elif self.proc_name == "error":
code("$0", self.exprs[0].embedError(cvec[0]))
elif self.proc_name == "assert":
error = self.exprs[0].embedError('"assert failure"')
code('''
#ifndef NDEBUG
if (!(${{cvec[0]}})) {
$error
}
#endif
''')
elif self.proc_name == "set_cache_entry":
code("set_cache_entry(m_cache_entry_ptr, %s);" %(cvec[0]));
elif self.proc_name == "unset_cache_entry":
code("unset_cache_entry(m_cache_entry_ptr);");
elif self.proc_name == "set_tbe":
code("set_tbe(m_tbe_ptr, %s);" %(cvec[0]));
elif self.proc_name == "unset_tbe":
code("unset_tbe(m_tbe_ptr);");
else:
# Normal function
if "external" not in func and not func.isInternalMachineFunc:
self.error("Invalid function")
params = ""
first_param = True
for (param_code, type) in zip(cvec, type_vec):
if first_param:
params = str(param_code)
first_param = False
else:
params += ', '
params += str(param_code);
fix = code.nofix()
code('(${{func.c_ident}}($params))')
code.fix(fix)
return func.return_type
| bsd-3-clause |
ani2404/ee6761cloud | inference.py | 1 | 1332 | # Build the model, restore the variables and run the inference
# Need to use SavedModel builder and loader instead - future work
import sys
sys.path.append('/home/ani2404/Desktop/ee6761cloud/')
import numpy as np
#Need to replace with the actual model
from code_ref.model import Model
class infer(object):
def __init__(self,session,checkpoint_dir,image_size_x,image_size_y,resolution_factor=4,batch_size=1):
#Build the model based on resolution factor
self.session = session
self.model = Model(session, checkpoint_dir=checkpoint_dir,batch_size=batch_size,
image_size_x=image_size_x,image_size_y=image_size_y,resolution_factor=resolution_factor)
self.resolution_factor = resolution_factor
# Restores the variables from the checkpoint dir
if self.model.load(checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [*] Load Failed")
def super_resolute(self,input_image):
# Super resolutes the input image
output_images,up_input = self.session.run([self.model.ESCNN,self.model.interpolation],
feed_dict={self.model.inputs:input_image})
output_images = np.array(output_images).astype(np.float32)
return output_images,up_input
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.