code
stringlengths 1
199k
|
|---|
"""Utilities to support packages."""
import os
import sys
import imp
import os.path
from types import ModuleType
from org.python.core import imp as _imp, BytecodeLoader
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def read_jython_code(fullname, file, filename):
data = _imp.readCode(filename, file, False)
return BytecodeLoader.makeCode(fullname + "$py", data, filename)
def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
iter_importer_modules = simplegeneric(iter_importer_modules)
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
filenames = os.listdir(self.path)
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
for fn in os.listdir(path):
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
f = open(pathname, "rb")
try:
return f.read()
finally:
f.close()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'rU')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_jython_code(fullname, self.file, self.filename)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
fullname = self._fix_name(fullname)
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
f = open(self.filename[:-1], 'rU')
try:
self.source = f.read()
finally:
f.close()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
return ImpImporter(self.filename).find_module('__init__')
def get_filename(self, fullname=None):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
if self.etc[2]==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = zipimport._zip_directory_cache[importer.archive].keys()
dirlist.sort()
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a PEP 302 importer for the given path item
The returned importer is cached in sys.path_importer_cache
if it was newly created by a path hook.
If there is no importer, a wrapper around the basic import
machinery is returned. This wrapper is never inserted into
the importer cache (None is inserted instead).
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
break
except ImportError:
pass
else:
importer = None
sys.path_importer_cache.setdefault(path_item, importer)
if importer is None:
try:
importer = ImpImporter(path_item)
except ImportError:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter()
def get_loader(module_or_name):
"""Get a PEP 302 "loader" object for module_or_name
If the module or package is accessible via the normal import
mechanism, a wrapper around the relevant part of that machinery
is returned. Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
This function uses iter_importers(), and is thus subject to the same
limitations regarding platform-specific special import locations such
as the Windows registry.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
fullname = module.__name__
elif module_or_name == sys:
# Jython sys is not a real module; fake it here for now since
# making it a module requires a fair amount of decoupling from
# PySystemState
fullname = "sys"
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
try:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
finally:
f.close()
return path
|
from . import foo
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ProfileConfig(AppConfig):
name = "profiles"
verbose_name = 'User Profiles'
def ready(self):
from . import signals # noqa
|
records = [select.query.decode(r) for r in records]
|
class С:
def __init__(self, x=None):
if x is None:
self.foo = {
'A': {
'x': 0,
'y': 0,
},
}
else: # init was given the previous state
assert isinstance(x, С)
self.foo = {
'A': {
'x': x.f<caret>oo['A']['x'],
'y': x.foo['A']['y'],
},
}
|
from __future__ import absolute_import
import sys
import jinja2
from django.conf import settings
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.utils import six
from django.utils.module_loading import import_string
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class Jinja2(BaseEngine):
app_dirname = 'jinja2'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(Jinja2, self).__init__(params)
environment = options.pop('environment', 'jinja2.Environment')
environment_cls = import_string(environment)
options.setdefault('autoescape', True)
options.setdefault('loader', jinja2.FileSystemLoader(self.template_dirs))
options.setdefault('auto_reload', settings.DEBUG)
options.setdefault('undefined',
jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code))
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name))
except jinja2.TemplateNotFound as exc:
six.reraise(
TemplateDoesNotExist,
TemplateDoesNotExist(exc.name, backend=self),
sys.exc_info()[2],
)
except jinja2.TemplateSyntaxError as exc:
new = TemplateSyntaxError(exc.args)
new.template_debug = get_exception_info(exc)
six.reraise(TemplateSyntaxError, new, sys.exc_info()[2])
class Template(object):
def __init__(self, template):
self.template = template
self.origin = Origin(
name=template.filename, template_name=template.name,
)
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template.render(context)
class Origin(object):
"""
A container to hold debug information as described in the template API
documentation.
"""
def __init__(self, name, template_name):
self.name = name
self.template_name = template_name
def get_exception_info(exception):
"""
Formats exception information for display on the debug page using the
structure described in the template API documentation.
"""
context_lines = 10
lineno = exception.lineno
lines = list(enumerate(exception.source.strip().split("\n"), start=1))
during = lines[lineno - 1][1]
total = len(lines)
top = max(0, lineno - context_lines - 1)
bottom = min(total, lineno + context_lines)
return {
'name': exception.filename,
'message': exception.message,
'source_lines': lines[top:bottom],
'line': lineno,
'before': '',
'during': during,
'after': '',
'total': total,
'top': top,
'bottom': bottom,
}
|
doctests = """
Test simple loop with conditional
>>> sum({i*i for i in range(100) if i&1 == 1})
166650
Test simple case
>>> {2*y + x + 1 for x in (0,) for y in (1,)}
set([3])
Test simple nesting
>>> list(sorted({(i,j) for i in range(3) for j in range(4)}))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list(sorted({(i,j) for i in range(4) for j in range(i)}))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum({i*i for i in range(100)})
328350
>>> i
20
Verify that syntax error's are raised for setcomps used as lvalues
>>> {y for y in (1,2)} = 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> {y for y in (1,2)} += 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
Make a nested set comprehension that acts like set(range())
>>> def srange(n):
... return {i for i in range(n)}
>>> list(sorted(srange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Same again, only as a lambda expression instead of a function definition
>>> lrange = lambda n: {i for i in range(n)}
>>> list(sorted(lrange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators can call other generators:
>>> def grange(n):
... for x in {i for i in range(n)}:
... yield x
>>> list(sorted(grange(5)))
[0, 1, 2, 3, 4]
Make sure that None is a valid return value
>>> {None for i in range(10)}
set([None])
Return lambdas that use the iteration variable as a default argument
>>> items = {(lambda i=i: i) for i in range(5)}
>>> {x() for x in items} == set(range(5))
True
Same again, only this time as a closure variable
>>> items = {(lambda: i) for i in range(5)}
>>> {x() for x in items}
set([4])
Another way to test that the iteration variable is local to the list comp
>>> items = {(lambda: i) for i in range(5)}
>>> i = 20
>>> {x() for x in items}
set([4])
And confirm that a closure can jump over the list comp scope
>>> items = {(lambda: y) for i in range(5)}
>>> y = 2
>>> {x() for x in items}
set([2])
We also repeat each of the above scoping tests inside a function
>>> def test_func():
... items = {(lambda i=i: i) for i in range(5)}
... return {x() for x in items}
>>> test_func() == set(range(5))
True
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... i = 20
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: y) for i in range(5)}
... y = 2
... return {x() for x in items}
>>> test_func()
set([2])
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import test_support
from test import test_setcomps
test_support.run_doctest(test_setcomps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
test_support.run_doctest(test_setcomps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
def main(request, response):
headers = []
if 'Content-Type' in request.GET:
headers += [('Content-Type', request.GET['Content-Type'])]
with open('./resources/ahem/AHEM____.TTF') as f:
return 200, headers, f.read()
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
- This is the default used if no other plugin is specified.
- There are no options to configure.
version_added: historical
author: core team (@ansible-core)
'''
from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data
|
buildSettings = {
# local: use this build if you're not modifying external resources
# no external resources allowed - they're not needed any more
'local': {
'resourceUrlBase': 'http://localhost:8100',
'distUrlBase': 'http://localhost:8100',
},
# local8000: if you need to modify external resources, this build will load them from
# the web server at http://0.0.0.0:8000/dist
# (This shouldn't be required any more - all resources are embedded. but, it remains just in case some new feature
# needs external resources)
'local8000': {
'resourceUrlBase': 'http://0.0.0.0:8000/dist',
'distUrlBase': None,
},
# mobile: default entry that also builds the mobile .apk
# you will need to have the android-sdk installed, and the file mobile/local.properties created as required
'mobile': {
'resourceUrlBase': None,
'distUrlBase': None,
'buildMobile': 'debug',
},
# if you want to publish your own fork of the project, and host it on your own web site
# create a localbuildsettings.py file containing something similar to this
# note: Firefox+Greasemonkey require the distUrlBase to be "https" - they won't check for updates on regular "http" URLs
#'example': {
# 'resourceBaseUrl': 'http://www.example.com/iitc/dist',
# 'distUrlBase': 'https://secure.example.com/iitc/dist',
#},
}
|
from __future__ import absolute_import, unicode_literals
import os
import shutil
import sys
import dirtyjson as json
from ..decorators import linter
from ..parsers.base import ParserBase
@linter(
name="coala",
install=[
["pipx", "install", "--spec", "coala-bears", "coala"],
[sys.executable, "-m", "pip", "install", "-U", "coala-bears"],
],
help_cmd=["coala", "-h"],
run=["coala", "-C", "--json", "--log-json", "--limit-files", "5000"],
rundefault=["coala", "-C", "--json", "--log-json", "--limit-files", "5000"],
dotfiles=[".coafile"],
language="all",
autorun=True,
run_per_file=False,
concurrency=1,
)
class CoalaParser(ParserBase):
"""Parse json coala output."""
def install(self):
if not any(
dotfile.strip() in os.listdir(os.getcwd())
for dotfile in self.config.get("dotfiles")
):
config_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "config")
)
dotfile_name = self.config.get("dotfiles")[0]
shutil.copyfile(
os.path.join(config_dir, dotfile_name),
os.path.join(os.getcwd(), dotfile_name),
)
def parse(self, output):
messages = set()
lint_data = [
msg
for category in json.loads(output).get("results", {}).values()
for msg in category
]
for msgdata in lint_data:
try:
msgbody = msgdata["message"]
for line in msgdata.get("affected_code", []):
path = line.get("file")
line = line.get("start", {}).get("line")
messages.add((path, line, msgbody))
except (ValueError, KeyError):
print("Invalid message: {0}".format(msgdata))
return messages
|
""" This file defines the RoutingTree class which can be used for constructing
routing trees for route segments from the fpga_interchange.physical_netlist
class PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip.
Use of the RoutingTree requires having the DeviceResources class loaded for
the relevant part for the design. Use
interchange_capnp.Interchange.read_device_resources to load a device resource
file.
"""
def create_id_map(id_to_segment, segments):
""" Create or update dict from object ids of segments to segments. """
for segment in segments:
segment_id = id(segment)
assert segment_id not in id_to_segment
id_to_segment[segment_id] = segment
create_id_map(id_to_segment, segment.branches)
def check_tree(routing_tree, segment):
""" Recursively checks a routing tree.
Checks for:
- Circular routing trees
- Child segments are connected to their parents.
"""
# Check for circular routing tree
for _ in yield_branches(segment):
pass
# Ensure children are connected to parent.
root_resource = routing_tree.get_device_resource(segment)
for child in segment.branches:
child_resource = routing_tree.get_device_resource(child)
assert root_resource.is_connected(child_resource), (str(segment),
str(child),
root_resource,
child_resource)
check_tree(routing_tree, child)
def yield_branches(routing_branch):
""" Yield all routing branches starting from the given route segment.
This will yield the input route branch in addition to its children.
An AssertionError will be raised for a circular route is detected.
"""
objs = set()
def descend(obj):
obj_id = id(obj)
assert obj_id not in objs
objs.add(obj_id)
yield obj
for seg in obj.branches:
for s in descend(seg):
yield s
for s in descend(routing_branch):
yield s
def sort_branches(branches):
""" Sort branches by the branch tuple.
The branch tuple is:
('bel_pin'/'site_pin'/'site_pip'/'pip', <site>/<tile>, ...)
so sorting in this way ensures that BEL pins are grouped, etc.
This also canonicalize the branch order, which makes comparing trees each,
just normalize both trees, and compare the result.
"""
branches.sort(key=lambda item: item.to_tuple())
def get_tuple_tree(root_branch):
""" Convert a rout branch in a two tuple. """
return root_branch.to_tuple(), tuple(
get_tuple_tree(branch) for branch in root_branch.branches)
class RoutingTree():
""" Utility class for managing stitching of a routing tree. """
def __init__(self, device_resources, site_types, stubs, sources):
# Check that no duplicate routing resources are present.
tuple_to_id = {}
for stub in stubs:
for branch in yield_branches(stub):
tup = branch.to_tuple()
assert tup not in tuple_to_id, tup
tuple_to_id[tup] = id(branch)
for source in sources:
for branch in yield_branches(source):
tup = branch.to_tuple()
assert tup not in tuple_to_id, tup
tuple_to_id[tup] = id(branch)
self.id_to_segment = {}
self.id_to_device_resource = {}
self.stubs = stubs
self.sources = sources
self.connections = None
# Populate id_to_segment and id_to_device_resource maps.
create_id_map(self.id_to_segment, self.stubs)
create_id_map(self.id_to_segment, self.sources)
for segment_id, segment in self.id_to_segment.items():
self.id_to_device_resource[
segment_id] = segment.get_device_resource(
site_types, device_resources)
# Verify initial input makes sense.
self.check_trees()
def segment_for_id(self, segment_id):
""" Get routing segment based on the object id of the routing segment. """
return self.id_to_segment[segment_id]
def normalize_tree(self):
""" Normalize the routing tree by sorted element. """
sort_branches(self.stubs)
sort_branches(self.sources)
for stub in self.stubs:
for branch in yield_branches(stub):
sort_branches(branch.branches)
for source in self.sources:
for branch in yield_branches(source):
sort_branches(branch.branches)
def get_tuple_tree(self):
""" Get tuple tree representation of the current routing tree.
This is suitable for equality checking if normalized with
normalize_tree.
"""
return (tuple(get_tuple_tree(stub) for stub in self.stubs),
tuple(get_tuple_tree(source) for source in self.sources))
def get_device_resource_for_id(self, segment_id):
""" Get the device resource that corresponds to the segment id given. """
return self.id_to_device_resource[segment_id]
def get_device_resource(self, segment):
""" Get the device resource that corresponds to the segment given. """
return self.id_to_device_resource[id(segment)]
def check_trees(self):
""" Check that the routing tree at and below obj is valid.
This method should be called after all route segments have been added
to the node cache.
"""
for stub in self.stubs:
check_tree(self, stub)
for source in self.sources:
assert self.get_device_resource(source).is_root(), source
check_tree(self, source)
def connections_for_segment_id(self, segment_id):
""" Yield all connection resources connected to segment id given. """
resource = self.id_to_device_resource[segment_id]
for site_wire in resource.site_wires():
yield site_wire
for node in resource.nodes():
yield node
def build_connections(self):
""" Create a dictionary of connection resources to segment ids. """
self.connections = {}
for segment_id in self.id_to_segment.keys():
for connection in self.connections_for_segment_id(segment_id):
if connection not in self.connections:
self.connections[connection] = set()
self.connections[connection].add(segment_id)
def get_connection(self, connection_resource):
""" Get list of segment ids connected to connection_resource. """
if self.connections is None:
self.build_connections()
return self.connections[connection_resource]
def reroot(self):
""" Determine which routing segments are roots and non-roots.
Repopulates stubs and sources list with new roots and non-root
segments.
"""
if self.connections is None:
self.build_connections()
segments = self.stubs + self.sources
self.stubs.clear()
self.sources.clear()
source_segment_ids = set()
# Example each connection and find the best root.
for segment_ids in self.connections.values():
root_priority = None
root = None
root_count = 0
for segment_id in segment_ids:
resource = self.get_device_resource_for_id(segment_id)
if resource.is_root():
possible_root_priority = resource.root_priority()
if root is None:
root_priority = possible_root_priority
root = segment_id
root_count = 1
elif possible_root_priority < root_priority:
root_priority = possible_root_priority
root = segment_id
root_count = 1
elif possible_root_priority == root_priority:
root_count += 1
if root is not None:
# Generate an error if multiple segments could be a root.
# This should only occur near IO pads. In most cases, the
# root should be the only Direction.Output BEL pin on the site
# wire.
assert root_count == 1
source_segment_ids.add(root)
for segment in segments:
if id(segment) in source_segment_ids:
self.sources.append(segment)
else:
self.stubs.append(segment)
def attach(self, parent_id, child_id):
""" Attach a child routing tree to the routing tree for parent. """
assert self.id_to_device_resource[parent_id].is_connected(
self.id_to_device_resource[child_id])
self.id_to_segment[parent_id].branches.append(
self.id_to_segment[child_id])
def check_count(self):
""" Verify that every segment is reachable from stubs and sources list.
This check ensures no routing segment is orphaned during processing.
"""
count = 0
for stub in self.stubs:
for _ in yield_branches(stub):
count += 1
for source in self.sources:
for _ in yield_branches(source):
count += 1
assert len(self.id_to_segment) == count
def attach_candidates(routing_tree, id_to_idx, stitched_stubs, objs_to_attach,
route_branch, visited):
""" Attach children of branches in the routing tree route_branch.
routing_tree : RoutingTree
A node cache that contains all routing branches in the net.
id_to_idx : dict object id to int
Map of object id to idx in a list of unstitched routing branches.
stitched_stubs : set of int
Set of indicies of stubs that have been stitched. Used to track which
stubs have been stitched into the tree, and verify stubs are not
stitched twice into the tree.
objs_to_attach : list of parent object id to child object id
When attach_candidates finds a stub that should be stitched into the
routing tree, rather than stitch it immediately, it adds a parent of
(id(parent), id(child)) to objs_to_attach. This deferal enables the
traversal of the input routing tree without modification.
After attach_candidates returns, elements of objs_to_attach should be
passed to routing_tree.attach to join the trees.
obj : PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip
Root of routing tree to iterate over to identify candidates to attach
to routing tree..
visited : set of ids to routing branches.
"""
root_obj_id = id(route_branch)
assert root_obj_id not in id_to_idx
for branch in yield_branches(route_branch):
# Make sure each route branch is only visited once.
assert id(branch) not in visited
visited.add(id(branch))
for connection in routing_tree.connections_for_segment_id(id(branch)):
for segment_id in routing_tree.get_connection(connection):
if id(branch) == segment_id:
continue
if segment_id not in id_to_idx:
continue
# There should never be a loop because root_obj_id should not
# be in the id_to_idx map once it is stitched into another tree.
assert root_obj_id != segment_id
if not routing_tree.get_device_resource(branch).is_connected(
routing_tree.get_device_resource_for_id(segment_id)):
continue
idx = id_to_idx[segment_id]
if idx in stitched_stubs:
assert segment_id in objs_to_attach
proposed_parent = id(branch)
old_parent = objs_to_attach[segment_id]
assert old_parent == proposed_parent, (
str(routing_tree.segment_for_id(proposed_parent)),
str(routing_tree.segment_for_id(old_parent)),
str(routing_tree.segment_for_id(segment_id)))
else:
stitched_stubs.add(idx)
objs_to_attach[segment_id] = id(branch)
def attach_from_parents(routing_tree, id_to_idx, parents, visited):
""" Attach children routing tree starting from list of parent routing trees.
routing_tree : RoutingTree
A node cache that contains all routing branches in the net.
id_to_idx : dict object id to int
Map of object id to idx in a list of unstitched routing branches.
parents : list of PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip
Roots of routing tree to search for children trees.
visited : set of ids to routing branches.
Returns set of indicies to stitched stubs.
"""
objs_to_attach = {}
stitched_stubs = set()
for parent in parents:
attach_candidates(
routing_tree=routing_tree,
id_to_idx=id_to_idx,
stitched_stubs=stitched_stubs,
objs_to_attach=objs_to_attach,
route_branch=parent,
visited=visited)
for child_id, branch_id in objs_to_attach.items():
# The branch_id should not be in the id_to_idx map, because it should
# be an outstanding stub.
assert branch_id not in id_to_idx
# The child_id should be in the id_to_idx map, because it should be an
# outstanding stub.
assert child_id in id_to_idx
routing_tree.attach(branch_id, child_id)
stitched_stubs.add(id_to_idx[child_id])
del id_to_idx[child_id]
# Return the newly stitched stubs, so that they form the new parent list.
return stitched_stubs
def stitch_segments(device_resources, site_types, segments):
""" Stitch segments of the routing tree into trees rooted from net sources. """
routing_tree = RoutingTree(
device_resources, site_types, stubs=segments, sources=[])
routing_tree.reroot()
# Create a id to idx map so that stitching can be deferred when walking
# trees
id_to_idx = {}
for idx, stub in enumerate(routing_tree.stubs):
assert idx not in id_to_idx
id_to_idx[id(stub)] = idx
# Initial set of tree parents are just the sources
parents = routing_tree.sources
stitched_stubs = set()
# Track visited nodes, as it is expected to never visit a route branch
# more than once.
visited = set()
# Continue iterating until no more stubs are stitched.
while len(parents) > 0:
# Starting from the parents of the current tree, add stubs the
# descend from this set, and create a new set of parents from those
# stubs.
newly_stitched_stubs = attach_from_parents(routing_tree, id_to_idx,
parents, visited)
# Mark the newly stitched stubs to be removed.
stitched_stubs |= newly_stitched_stubs
# New set of parents using from the newly stitched stubs.
parents = [routing_tree.stubs[idx] for idx in newly_stitched_stubs]
# Remove stitched stubs from stub list
for idx in sorted(stitched_stubs, reverse=True):
del routing_tree.stubs[idx]
# Make sure new trees are sensible.
routing_tree.check_trees()
routing_tree.check_count()
return routing_tree.sources, routing_tree.stubs
def flatten_segments(segments):
""" Take a list of routing segments and flatten out any children. """
output = []
for segment in segments:
for branch in yield_branches(segment):
output.append(branch)
for segment in output:
segment.branches.clear()
return output
|
import sys, os.path, re
from distutils.core import setup
from distutils.extension import Extension
if not os.path.isfile('config.mak'):
print "please run ./configure && make first"
print "Note: setup.py is supposed to be run from Makefile"
sys.exit(1)
buf = open("configure.ac","r").read(256)
m = re.search("AC_INIT[(][^,]*,\s+([^)]*)[)]", buf)
ac_ver = m.group(1)
def getvar(name):
cf = open('config.mak').read()
m = re.search(r'^%s\s*=\s*(.*)' % name, cf, re.M)
return m.group(1).strip()
sfx = getvar('SUFFIX')
share_dup_files = [
'sql/pgq/pgq.sql',
'sql/londiste/londiste.sql',
'sql/pgq_ext/pgq_ext.sql',
'sql/pgq_node/pgq_node.sql',
]
if os.path.isfile('sql/txid/txid.sql'):
share_dup_files.append('sql/txid/txid.sql')
setup(
name = "skytools",
license = "BSD",
version = ac_ver,
maintainer = "Marko Kreen",
maintainer_email = "markokr@gmail.com",
url = "http://pgfoundry.org/projects/skytools/",
package_dir = {'': 'python'},
packages = ['skytools', 'londiste', 'pgq', 'pgq.cascade'],
data_files = [
('share/doc/skytools%s/conf' % sfx, [
'python/conf/wal-master.ini',
'python/conf/wal-slave.ini',
]),
('share/skytools' + sfx, share_dup_files)],
ext_modules=[Extension("skytools._cquoting", ['python/modules/cquoting.c'])],
)
|
"""
Based on :mod:`django.contrib.auth.tokens`. Supports the following settings:
:setting:`WALDO_REGISTRATION_TIMEOUT_DAYS`
The number of days a registration link will be valid before expiring. Default: 1.
:setting:`WALDO_EMAIL_TIMEOUT_DAYS`
The number of days an email change link will be valid before expiring. Default: 1.
"""
from hashlib import sha1
from datetime import date
from django.conf import settings
from django.utils.http import int_to_base36, base36_to_int
from django.contrib.auth.tokens import PasswordResetTokenGenerator
REGISTRATION_TIMEOUT_DAYS = getattr(settings, 'WALDO_REGISTRATION_TIMEOUT_DAYS', 1)
EMAIL_TIMEOUT_DAYS = getattr(settings, 'WALDO_EMAIL_TIMEOUT_DAYS', 1)
class RegistrationTokenGenerator(PasswordResetTokenGenerator):
"""Strategy object used to generate and check tokens for the user registration mechanism."""
def check_token(self, user, token):
"""Check that a registration token is correct for a given user."""
# If the user is active, the hash can't be valid.
if user.is_active:
return False
# Parse the token
try:
ts_b36, hash = token.split('-')
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp and uid have not been tampered with.
if self._make_token_with_timestamp(user, ts) != token:
return False
# Check that the timestamp is within limit
if (self._num_days(self._today()) - ts) > REGISTRATION_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state that is
# sure to change, we produce a hash that will be invalid as soon as it
# is used.
hash = sha1(settings.SECRET_KEY + unicode(user.id) + unicode(user.is_active) + user.last_login.strftime('%Y-%m-%d %H:%M:%S') + unicode(timestamp)).hexdigest()[::2]
return '%s-%s' % (ts_b36, hash)
registration_token_generator = RegistrationTokenGenerator()
class EmailTokenGenerator(PasswordResetTokenGenerator):
"""Strategy object used to generate and check tokens for a user email change mechanism."""
def make_token(self, user, email):
"""Returns a token that can be used once to do an email change for the given user and email."""
return self._make_token_with_timestamp(user, email, self._num_days(self._today()))
def check_token(self, user, email, token):
if email == user.email:
return False
# Parse the token
try:
ts_b36, hash = token.split('-')
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp and uid have not been tampered with.
if self._make_token_with_timestamp(user, email, ts) != token:
return False
# Check that the timestamp is within limit
if (self._num_days(self._today()) - ts) > EMAIL_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, email, timestamp):
ts_b36 = int_to_base36(timestamp)
hash = sha1(settings.SECRET_KEY + unicode(user.id) + user.email + email + unicode(timestamp)).hexdigest()[::2]
return '%s-%s' % (ts_b36, hash)
email_token_generator = EmailTokenGenerator()
|
import urllib.request
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("url", help="the URL whose HTML you want to extract telephone numbers from", type=str)
args = parser.parse_args()
with urllib.request.urlopen(args.url) as response:
html = response.read().decode('utf-8')
regex = re.compile(r'0?0?1?-?\(?[0-9]{3}\)?\s?-?[0-9]{3}-?[0-9]{4}')
print(regex.findall(html))
|
import difflib
import shutil
__author__ = 'Adam'
import unittest
import os
import useful
class TestFileContentProcedures(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = "./tests/data/"
cls.text_file_name = "TextFile_UTF16_CRLF.txt"
cls.text_file_path = os.path.join(cls.test_dir, cls.text_file_name)
cls.text_file_encoding = "UTF-16BE"
cls.text_file_eol = "CRLF"
import codecs
with codecs.open(cls.text_file_path, 'rb',
encoding=cls.text_file_encoding) as f:
cls.text_file_contents = f.read()
cls.script_file_name = "ScriptFile_UTF8_LF.py"
cls.script_file_path = os.path.join(cls.test_dir, cls.script_file_name)
cls.script_file_encoding = "UTF-8"
cls.script_file_eol = "LF"
with codecs.open(cls.script_file_path, 'rb',
encoding=cls.script_file_encoding) as f:
cls.script_file_contents = f.read()
cls.set_contents = cls.text_file_contents
cls.set_name = "TestSetContents.txt"
cls.set_path = os.path.join(cls.test_dir, cls.set_name)
# diff testing
cls.diff_target_path = os.path.join(cls.test_dir, "ScriptFile_Copy.py")
shutil.copyfile(cls.script_file_path, cls.diff_target_path)
cls.diff_new_path = os.path.join(cls.test_dir,
"ScriptFile_Diff_Test.py")
with open(cls.diff_target_path, "rb") as f:
target_data = f.read().split("\n")
with open(cls.diff_new_path, "rb") as f:
new_data = f.read().split("\n")
diff_data = difflib.ndiff(target_data, new_data)
diff_data = list(diff_data)
cls.comp_diff_data = useful.make_comp_diff(diff_data)
@classmethod
def tearDownClass(cls):
# os.remove(cls.set_path)
# os.remove(cls.diff_target_path)
pass
'''
The system is required to be able to obtain the content of a file.
This test is successful if the content is matched as is with expected data.
'''
def test_get_file_contents(self):
from fsentity import FileSystemFile
script_file = FileSystemFile(self.script_file_path)
self.assertEquals(script_file.get_contents()[0],
self.script_file_contents)
text_file = FileSystemFile(self.text_file_path)
self.assertEquals(text_file.get_contents()[0], self.text_file_contents)
'''
The system must be able to set the contents of a file.
Test is successful if changes are made that match the expected outcome.
'''
def test_set_file_contents(self):
from fsentity import FileSystemDirectory
d = FileSystemDirectory(self.test_dir)
d.create_file(self.set_name, self.set_contents)
import codecs
with codecs.open(self.set_path, 'rb', encoding="utf-8") as f:
file_data = f.read()
# print file_data
self.assertEquals(file_data, self.set_contents)
'''
The system will need to update a file's contents from a differential
format.
The test is successful if the resulting file contents matches the result
of the original content with
a supplied delta.
'''
def test_set_file_from_diff(self):
from fsentity import FileSystemFile
target_file = FileSystemFile(self.diff_target_path)
diff_crc = FileSystemFile(self.diff_new_path).get_crc32()
self.assertTrue(target_file.set_from_comp_diff(self.comp_diff_data,
original_crc=diff_crc))
''' Identify byte encoding '''
def test_identify_encoding(self):
from fsentity import FileSystemFile
text_file = FileSystemFile(self.text_file_path)
self.assertEqual(
text_file.get_encoding().upper(),
self.text_file_encoding
)
script_file = FileSystemFile(self.script_file_path)
self.assertEqual(self.script_file_encoding,
script_file.get_encoding().upper())
''' Identify EOL format '''
def test_identify_line_ending(self):
from fsentity import FileSystemFile
f = FileSystemFile(self.text_file_path)
self.assertEqual(self.text_file_eol, f.get_line_ending()[0])
f = FileSystemFile(self.script_file_path)
self.assertEqual(self.script_file_eol, f.get_line_ending()[0])
''' ... code style? '''
def test_identify_format(self):
from fsentity import FileSystemFile
lang = FileSystemFile(self.script_file_path).get_programming_language()
self.assertEqual("Python", lang)
|
from axelrod import Player
import random
class Random(Player):
"""A player who randomly chooses between cooperating and defecting."""
name = 'Random'
def strategy(self, opponent):
return random.choice(['C','D'])
|
import unittest
import copy
from scrapy.http import Headers
class HeadersTest(unittest.TestCase):
def test_basics(self):
h = Headers({'Content-Type': 'text/html', 'Content-Length': 1234})
assert h['Content-Type']
assert h['Content-Length']
self.assertRaises(KeyError, h.__getitem__, 'Accept')
self.assertEqual(h.get('Accept'), None)
self.assertEqual(h.getlist('Accept'), [])
self.assertEqual(h.get('Accept', '*/*'), '*/*')
self.assertEqual(h.getlist('Accept', '*/*'), ['*/*'])
self.assertEqual(h.getlist('Accept', ['text/html', 'images/jpeg']), ['text/html','images/jpeg'])
def test_single_value(self):
h = Headers()
h['Content-Type'] = 'text/html'
self.assertEqual(h['Content-Type'], 'text/html')
self.assertEqual(h.get('Content-Type'), 'text/html')
self.assertEqual(h.getlist('Content-Type'), ['text/html'])
def test_multivalue(self):
h = Headers()
h['X-Forwarded-For'] = hlist = ['ip1', 'ip2']
self.assertEqual(h['X-Forwarded-For'], 'ip2')
self.assertEqual(h.get('X-Forwarded-For'), 'ip2')
self.assertEqual(h.getlist('X-Forwarded-For'), hlist)
assert h.getlist('X-Forwarded-For') is not hlist
def test_encode_utf8(self):
h = Headers({u'key': u'\xa3'}, encoding='utf-8')
key, val = dict(h).items()[0]
assert isinstance(key, str), key
assert isinstance(val[0], str), val[0]
self.assertEqual(val[0], '\xc2\xa3')
def test_encode_latin1(self):
h = Headers({u'key': u'\xa3'}, encoding='latin1')
key, val = dict(h).items()[0]
self.assertEqual(val[0], '\xa3')
def test_encode_multiple(self):
h = Headers({u'key': [u'\xa3']}, encoding='utf-8')
key, val = dict(h).items()[0]
self.assertEqual(val[0], '\xc2\xa3')
def test_delete_and_contains(self):
h = Headers()
h['Content-Type'] = 'text/html'
assert 'Content-Type' in h
del h['Content-Type']
assert 'Content-Type' not in h
def test_setdefault(self):
h = Headers()
hlist = ['ip1', 'ip2']
olist = h.setdefault('X-Forwarded-For', hlist)
assert h.getlist('X-Forwarded-For') is not hlist
assert h.getlist('X-Forwarded-For') is olist
h = Headers()
olist = h.setdefault('X-Forwarded-For', 'ip1')
self.assertEqual(h.getlist('X-Forwarded-For'), ['ip1'])
assert h.getlist('X-Forwarded-For') is olist
def test_iterables(self):
idict = {'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']}
h = Headers(idict)
self.assertEqual(dict(h), {'Content-Type': ['text/html'], 'X-Forwarded-For': ['ip1', 'ip2']})
self.assertEqual(h.keys(), ['X-Forwarded-For', 'Content-Type'])
self.assertEqual(h.items(), [('X-Forwarded-For', ['ip1', 'ip2']), ('Content-Type', ['text/html'])])
self.assertEqual(list(h.iteritems()),
[('X-Forwarded-For', ['ip1', 'ip2']), ('Content-Type', ['text/html'])])
self.assertEqual(h.values(), ['ip2', 'text/html'])
def test_update(self):
h = Headers()
h.update({'Content-Type': 'text/html', 'X-Forwarded-For': ['ip1', 'ip2']})
self.assertEqual(h.getlist('Content-Type'), ['text/html'])
self.assertEqual(h.getlist('X-Forwarded-For'), ['ip1', 'ip2'])
def test_copy(self):
h1 = Headers({'header1': ['value1', 'value2']})
h2 = copy.copy(h1)
self.assertEqual(h1, h2)
self.assertEqual(h1.getlist('header1'), h2.getlist('header1'))
assert h1.getlist('header1') is not h2.getlist('header1')
assert isinstance(h2, Headers)
def test_appendlist(self):
h1 = Headers({'header1': 'value1'})
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), ['value1', 'value3'])
h1 = Headers()
h1.appendlist('header1', 'value1')
h1.appendlist('header1', 'value3')
self.assertEqual(h1.getlist('header1'), ['value1', 'value3'])
def test_setlist(self):
h1 = Headers({'header1': 'value1'})
self.assertEqual(h1.getlist('header1'), ['value1'])
h1.setlist('header1', ['value2', 'value3'])
self.assertEqual(h1.getlist('header1'), ['value2', 'value3'])
def test_setlistdefault(self):
h1 = Headers({'header1': 'value1'})
h1.setlistdefault('header1', ['value2', 'value3'])
h1.setlistdefault('header2', ['value2', 'value3'])
self.assertEqual(h1.getlist('header1'), ['value1'])
self.assertEqual(h1.getlist('header2'), ['value2', 'value3'])
def test_none_value(self):
h1 = Headers()
h1['foo'] = 'bar'
h1['foo'] = None
h1.setdefault('foo', 'bar')
self.assertEqual(h1.get('foo'), None)
self.assertEqual(h1.getlist('foo'), [])
|
'''
mode | desc
r 또는 rt | 텍스트 모드로 읽기
w 또는 wt | 텍스트 모드로 쓰기
a 또는 at | 텍스트 모드로 파일 마지막에 추가하기
rb | 바이너리 모드로 읽기
wb | 바이너리 모드로 쓰기
ab | 바이너리 모드로 파일 마지막에 추가하기
'''
f = open("./py200_sample.txt", "w")
f.write("abcd")
f.close()
r = open("./py200_sample.txt", "r")
print("-" * 60)
print(r.readline())
r.close()
|
from otp.ai.AIBaseGlobal import *
from pandac.PandaModules import *
from DistributedNPCToonBaseAI import *
import ToonDNA
from direct.task.Task import Task
from toontown.ai import DatabaseObject
from toontown.estate import ClosetGlobals
class DistributedNPCTailorAI(DistributedNPCToonBaseAI):
freeClothes = simbase.config.GetBool('free-clothes', 0)
housingEnabled = simbase.config.GetBool('want-housing', 1)
def __init__(self, air, npcId):
DistributedNPCToonBaseAI.__init__(self, air, npcId)
self.timedOut = 0
self.givesQuests = 0
self.customerDNA = None
self.customerId = None
return
def getTailor(self):
return 1
def delete(self):
taskMgr.remove(self.uniqueName('clearMovie'))
self.ignoreAll()
self.customerDNA = None
self.customerId = None
DistributedNPCToonBaseAI.delete(self)
return
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
if not self.air.doId2do.has_key(avId):
self.notify.warning('Avatar: %s not found' % avId)
return
if self.isBusy():
self.freeAvatar(avId)
return
av = self.air.doId2do[avId]
self.customerDNA = ToonDNA.ToonDNA()
self.customerDNA.makeFromNetString(av.getDNAString())
self.customerId = avId
av.b_setDNAString(self.customerDNA.makeNetString())
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
flag = NPCToons.PURCHASE_MOVIE_START_BROWSE
if self.freeClothes:
flag = NPCToons.PURCHASE_MOVIE_START
if self.housingEnabled and self.isClosetAlmostFull(av):
flag = NPCToons.PURCHASE_MOVIE_START_NOROOM
elif self.air.questManager.hasTailorClothingTicket(av, self) == 1:
flag = NPCToons.PURCHASE_MOVIE_START
if self.housingEnabled and self.isClosetAlmostFull(av):
flag = NPCToons.PURCHASE_MOVIE_START_NOROOM
elif self.air.questManager.hasTailorClothingTicket(av, self) == 2:
flag = NPCToons.PURCHASE_MOVIE_START
if self.housingEnabled and self.isClosetAlmostFull(av):
flag = NPCToons.PURCHASE_MOVIE_START_NOROOM
self.sendShoppingMovie(avId, flag)
DistributedNPCToonBaseAI.avatarEnter(self)
def isClosetAlmostFull(self, av):
numClothes = len(av.clothesTopsList) / 4 + len(av.clothesBottomsList) / 2
if numClothes >= av.maxClothes - 1:
return 1
return 0
def sendShoppingMovie(self, avId, flag):
self.busy = avId
self.sendUpdate('setMovie', [flag,
self.npcId,
avId,
ClockDelta.globalClockDelta.getRealNetworkTime()])
taskMgr.doMethodLater(NPCToons.TAILOR_COUNTDOWN_TIME, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def rejectAvatar(self, avId):
self.notify.warning('rejectAvatar: should not be called by a Tailor!')
def sendTimeoutMovie(self, task):
toon = self.air.doId2do.get(self.customerId)
if toon != None and self.customerDNA:
toon.b_setDNAString(self.customerDNA.makeNetString())
self.timedOut = 1
self.sendUpdate('setMovie', [NPCToons.PURCHASE_MOVIE_TIMEOUT,
self.npcId,
self.busy,
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
return Task.done
def sendClearMovie(self, task):
self.ignore(self.air.getAvatarExitEvent(self.busy))
self.customerDNA = None
self.customerId = None
self.busy = 0
self.timedOut = 0
self.sendUpdate('setMovie', [NPCToons.PURCHASE_MOVIE_CLEAR,
self.npcId,
0,
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendUpdate('setCustomerDNA', [0, ''])
return Task.done
def completePurchase(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.PURCHASE_MOVIE_COMPLETE,
self.npcId,
avId,
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
return
def setDNA(self, blob, finished, which):
avId = self.air.getAvatarIdFromSender()
if avId != self.customerId:
if self.customerId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA customer is %s' % self.customerId)
self.notify.warning('customerId: %s, but got setDNA for: %s' % (self.customerId, avId))
return
testDNA = ToonDNA.ToonDNA()
if not testDNA.isValidNetString(blob):
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA: invalid dna: %s' % blob)
return
if self.air.doId2do.has_key(avId):
av = self.air.doId2do[avId]
if finished == 2 and which > 0:
if self.air.questManager.removeClothingTicket(av, self) == 1 or self.freeClothes:
av.b_setDNAString(blob)
if which & ClosetGlobals.SHIRT:
if av.addToClothesTopsList(self.customerDNA.topTex, self.customerDNA.topTexColor, self.customerDNA.sleeveTex, self.customerDNA.sleeveTexColor) == 1:
av.b_setClothesTopsList(av.getClothesTopsList())
else:
self.notify.warning('NPCTailor: setDNA() - unable to save old tops - we exceeded the tops list length')
if which & ClosetGlobals.SHORTS:
if av.addToClothesBottomsList(self.customerDNA.botTex, self.customerDNA.botTexColor) == 1:
av.b_setClothesBottomsList(av.getClothesBottomsList())
else:
self.notify.warning('NPCTailor: setDNA() - unable to save old bottoms - we exceeded the bottoms list length')
self.air.writeServerEvent('boughtTailorClothes', avId, '%s|%s|%s' % (self.doId, which, self.customerDNA.asTuple()))
else:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA bogus clothing ticket')
self.notify.warning('NPCTailor: setDNA() - client tried to purchase with bogus clothing ticket!')
if self.customerDNA:
av.b_setDNAString(self.customerDNA.makeNetString())
elif finished == 1:
if self.customerDNA:
av.b_setDNAString(self.customerDNA.makeNetString())
else:
self.sendUpdate('setCustomerDNA', [avId, blob])
else:
self.notify.warning('no av for avId: %d' % avId)
if self.timedOut == 1 or finished == 0:
return
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.completePurchase(avId)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA busy with %s' % self.busy)
self.notify.warning('setDNA from unknown avId: %s busy: %s' % (avId, self.busy))
def __handleUnexpectedExit(self, avId):
self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly')
if self.customerId == avId:
toon = self.air.doId2do.get(avId)
if toon == None:
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = avId
if self.customerDNA:
toon.b_setDNAString(self.customerDNA.makeNetString())
db = DatabaseObject.DatabaseObject(self.air, avId)
db.storeObject(toon, ['setDNAString'])
else:
self.notify.warning('invalid customer avId: %s, customerId: %s ' % (avId, self.customerId))
if self.busy == avId:
self.sendClearMovie(None)
else:
self.notify.warning('not busy with avId: %s, busy: %s ' % (avId, self.busy))
return
|
import os
import time
import logging
import string
import requests
import unicodedata
import base64
try: import cPickle as pickle
except: import pickle
import datetime
from django.utils import timezone
import json
from pprint import pprint
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.http import HttpResponseForbidden
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
logger = logging.getLogger(__name__)
import boto.ec2
import boto.ec2.cloudwatch
from django.contrib.auth.models import User
from userprofile.models import Profile as userprofile
from userprofile.views import _log_user_activity
from amazon import s3_funcs
from amazon import s3_funcs_shortcuts
from django.contrib.auth.decorators import login_required
from django.template.defaultfilters import filesizeformat, upper
from django.contrib.humanize.templatetags.humanize import naturalday
from cloudly.templatetags.cloud_extras import clean_ps_command
from operator import itemgetter, attrgetter, methodcaller
from cloudly.templatetags.cloud_extras import clear_filename, get_file_extension
from vms.models import Cache
import decimal
from django.db.models.base import ModelState
import pymongo
from pymongo import MongoClient
from pymongo import ASCENDING, DESCENDING
client = MongoClient('mongo', 27017)
mongo = client.cloudly
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
@login_required()
def update_session(request):
for value in request.POST:
if(value != 'secret'):
request.session[value] = request.POST[value]
request.session.modified = True
return render_to_response('ajax_null.html', locals())
@login_required()
def aws_vm_view(request,vm_name):
print '-- aws_vm_view'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
user.last_login = datetime.datetime.now()
user.save()
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/aws/"+vm_name,"aws_vm_view",ip=ip)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
try:
vm_cache = pickle.loads(vm_cache)[vm_name]
except:
return HttpResponse("XXX " + vm_name)
ec2_region = vm_cache['instance']['region']['name']
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
if(vms_cache.vms_console_output_cache):
console_output = vms_cache.vms_console_output_cache
else:
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
reservations = ec2conn.get_all_instances(instance_ids=[vm_name,])
instance = reservations[0].instances[0]
console_output = instance.get_console_output()
console_output = console_output.output
if(not console_output):
console_output = ""
vms_cache.vms_console_output_cache = console_output
vms_cache.save()
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=60)
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkIn")[0]
networkin_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkOut")[0]
networkout_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadOps")[0]
disk_readops_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteOps")[0]
disk_writeops_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadBytes")[0]
disk_readbytes_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteBytes")[0]
disk_writebytes_datapoints = metric.query(start, end, 'Average', '')
networkin_datapoints = json.dumps(networkin_datapoints,default=date_handler)
networkout_datapoints = json.dumps(networkout_datapoints,default=date_handler)
disk_readops_datapoints = json.dumps(disk_readops_datapoints,default=date_handler)
disk_writeops_datapoints = json.dumps(disk_writeops_datapoints,default=date_handler)
disk_readbytes_datapoints = json.dumps(disk_readbytes_datapoints,default=date_handler)
disk_writebytes_datapoints = json.dumps(disk_writebytes_datapoints,default=date_handler)
return render_to_response('aws_vm.html', {'vm_name':vm_name,'vm_cache':vm_cache,'console_output':console_output,'networkin_datapoints':networkin_datapoints,'networkout_datapoints':networkout_datapoints,'disk_readops_datapoints':disk_readops_datapoints,'disk_writeops_datapoints':disk_writeops_datapoints,'disk_readbytes_datapoints':disk_readbytes_datapoints,'disk_writebytes_datapoints':disk_writebytes_datapoints,}, context_instance=RequestContext(request))
@login_required()
def control_aws_vm(request, vm_name, action):
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
user.last_login = datetime.datetime.now()
user.save()
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/aws/"+vm_name+"/"+action+"/","control_aws_vm",ip=ip)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
vm_cache = pickle.loads(vm_cache)[vm_name]
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2_region = vm_cache['instance']['region']['name']
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
if(action=="reboot"):
ec2conn.reboot_instances([vm_name,])
if(action=="start"):
ec2conn.start_instances([vm_name,])
if(action=="stop"):
ec2conn.stop_instances([vm_name,])
if(action=="terminate"):
ec2conn.terminate_instances([vm_name,])
return HttpResponseRedirect("/")
@login_required()
def server_view(request, hwaddr):
print '-- server_view'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/server/"+hwaddr,"server_view",ip=ip)
hwaddr_orig = hwaddr
hwaddr = hwaddr.replace('-',':')
server = mongo.servers.find_one({'secret':profile.secret,'uuid':hwaddr,})
server_status = "Running"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
server_status = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
server_status = "Offline"
try:
uuid = server['uuid']
except:
return HttpResponse("access denied")
disks_usage_ = []
#disks_usage = mongo.disks_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in disks_usage: disks_usage_.append(i)
disks_usage = disks_usage_
networking_ = []
#networking = mongo.networking.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in networking: networking_.append(i)
networking = networking_
mem_usage_ = []
#mem_usage = mongo.memory_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in mem_usage: mem_usage_.append(i)
mem_usage = mem_usage_
loadavg_ = []
#loadavg = mongo.loadavg.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in loadavg: loadavg_.append(i)
loadavg = loadavg_
activity = mongo.activity.find({'uuid':uuid,}).sort('_id',-1).limit(3)
disks = []
disks_ = server[u'disks_usage']
for disk in disks_:
if not disk[5] in disks:
disks.append(disk[5])
return render_to_response('server_detail.html', {'secret':profile.secret,'hwaddr':hwaddr,'hwaddr_orig':hwaddr_orig,'server':server,'server_status':server_status,'disks_usage':disks_usage,'disks':disks,'mem_usage':mem_usage,'loadavg':loadavg,'networking':networking,'activity':activity,}, context_instance=RequestContext(request))
@login_required()
def ajax_update_server_name(request):
response = {}
response["success"] = "true"
response = str(response).replace('u"','"')
response = response.replace("'",'"')
server_ = request.POST['server']
secret = request.POST['secret']
server_ = server_.replace('-', ':')
server = mongo.servers.find_one({'secret':secret,'uuid':server_,})
if request.POST["servername"] == "":
server['name'] = request.POST['server'].replace("-", ":")
else:
server['name'] = request.POST["servername"]
server = mongo.servers.update({'secret':secret, 'uuid':server_}, server)
vms_cache = Cache.objects.get(user=request.user)
vms_cache.delete()
return HttpResponse(response, content_type="application/json")
@login_required()
def ajax_vms_refresh(request):
user = request.user
profile = userprofile.objects.get(user=request.user)
print 'Refreshing', user, 'VMs cache..'
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
virtual_machines = {}
servers = mongo.servers.find({'secret':profile.secret,}).sort('_id',-1)
vms_cache = Cache.objects.get_or_create(user=user)
vms_cache = vms_cache[0]
vms_cache.is_updating = True
vms_cache.save()
if(servers.count()):
print 'servers count', servers.count()
for server in servers:
instance_metrics = {}
instance_metrics['id'] = server['uuid']
instance_metrics['user_id'] = request.user.id
instance_metrics['provider'] = 'agent'
instance_metrics['instance'] = {}
instance_metrics['instance']['user_id'] = request.user.id
instance_metrics['instance']['state'] = {}
instance_metrics['instance']['tags'] = {}
try:
instance_metrics["instance"]['tags']['Name'] = server['name']
#instance_metrics["instance"]['tags']['Name'] = ''.join(x for x in unicodedata.normalize('NFKD', server['hostname']) if x in string.ascii_letters).lower()
except:
instance_metrics["instance"]['tags']['Name'] = server['hostname'].replace('.','-').lower()
uuid = server['uuid']
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
instance_metrics['instance']['state']['state'] = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
instance_metrics['instance']['state']['state'] = "Offline"
else:
instance_metrics['instance']['state']['state'] = "Running"
cpu_usage_ = ""
params = {'start':'2m-ago','m':'sum:' + uuid.replace(':','-') + '.sys.cpu'}
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
try:
tsdb_response = tsdb_response[0]['dps']
except:
tsdb_response = []
c=0
for i in tsdb_response:
cpu_usage_ += str(round(tsdb_response[i],2))
cpu_usage_ += ","
if(c==60): break
c+=1
cpu_usage = cpu_usage_[:-1]
cpu_usage_reversed = ""
cpu_usage_array_reversed = []
for i in cpu_usage.split(','): cpu_usage_array_reversed.insert(0,i)
for i in cpu_usage_array_reversed: cpu_usage_reversed += str(i)+","
cpu_usage_reversed = cpu_usage_reversed[:-1]
instance_metrics['cpu_utilization_datapoints'] = cpu_usage_reversed
virtual_machines[server['uuid'].replace(':','-')] = instance_metrics
#print 'virtual_machines', virtual_machines
if aws_ec2_verified:
aws_regions = profile.aws_enabled_regions.split(',')
print 'AWS regions', aws_regions
for ec2_region in aws_regions:
if(ec2_region):
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
try:
reservations = ec2conn.get_all_instances()
except:
vms_cache.is_updating = False
vms_cache.vms_response = ""
vms_cache.save()
print vms_cache.is_updating
print vms_cache.vms_response
#return HttpResponse("access denied")
instances = [i for r in reservations for i in r.instances]
for instance in instances:
if not instance: continue
instance_metrics = {}
instance_metrics['instance'] = {}
print '** instance', instance.id, instance.private_ip_address
volumes = []
for volume in ec2conn.get_all_volumes(filters={'attachment.instance-id': instance.id}):
volumes.append([volume.id, volume.iops, volume.size,])
groups = []
for group in instance.__dict__['groups']:
groups.append([group.id, group.name,])
instance_metrics['id'] = instance.id
instance_metrics['user_id'] = request.user.id
instance_metrics['provider'] = "aws-ec2"
instance_metrics['instance']['placement'] = instance.placement
instance_metrics['instance']['user_id'] = request.user.id
instance_metrics['instance']['groups'] = groups
instance_metrics['instance']['block_device_mapping'] = volumes
instance_metrics['instance']['architecture'] = instance.architecture
instance_metrics['instance']['client_token'] = instance.client_token
instance_metrics['instance']['dns_name'] = instance.dns_name
instance_metrics['instance']['private_ip_address'] = instance.private_ip_address
instance_metrics['instance']['hypervisor'] = instance.hypervisor
instance_metrics['instance']['id'] = instance.id
instance_metrics['instance']['image_id'] = instance.image_id
instance_metrics['instance']['instance_type'] = instance.instance_type
instance_metrics['instance']['ip_address'] = instance.ip_address
instance_metrics['instance']['key_name'] = instance.key_name
instance_metrics['instance']['launch_time'] = instance.launch_time
instance_metrics['instance']['monitored'] = instance.monitored
instance_metrics['instance']['persistent'] = instance.persistent
instance_metrics['instance']['ramdisk'] = instance.ramdisk
instance_metrics['instance']['root_device_name'] = instance.root_device_name
instance_metrics['instance']['root_device_type'] = instance.root_device_type
instance_metrics['instance']['tags'] = instance.tags
instance_metrics['instance']['virtualization_type'] = instance.virtualization_type
instance_metrics['instance']['vpc_id'] = instance.vpc_id
instance_metrics['instance']['region'] = {"endpoint":instance.region.endpoint,"name":instance.region.name,}
instance_metrics['instance']['state'] = {"state":instance.state,"code":instance.state_code,"state_reason":instance.state_reason,}
virtual_machines[instance.id] = instance_metrics
print 'Updating', request.user, 'cache..'
print instance.platform, instance.product_codes
try:
ec2conn.monitor_instance(str(instance.id))
except:
print instance.id, 'instance not in a monitorable state!!'.upper()
#pprint(instance_metrics)
continue
# Here is where you define start - end for the Logs...............
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=60)
# This is how you list all possible values on the response....
# print ec2conn.list_metrics()
try:
metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="CPUUtilization")[0]
except: continue
cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent')
instance_metrics['cpu_utilization_datapoints'] = json.dumps(cpu_utilization_datapoints,default=date_handler)
virtual_machines[instance.id] = instance_metrics
vms_cache.vms_response = base64.b64encode(pickle.dumps(virtual_machines, pickle.HIGHEST_PROTOCOL))
vms_cache.last_seen = timezone.now()
vms_cache.is_updating = False
vms_cache.save()
print 'VMs cache was succesfully updated.'
return HttpResponse("ALLDONE")
@login_required()
def ajax_virtual_machines(request):
print '-- ajax virtual machines'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
try:
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
except: vm_cache = {}
try:
vm_cache = pickle.loads(vm_cache)
except: vm_cache = {}
c=0
ajax_vms_response = "{"
for vm in vm_cache:
if(vm_cache[vm]["instance"]["state"]["state"].lower()!="terminated"):
data_median = 0
isotope_filter_classes = " offline linux "
try:
data = ""
cpu_utilization_datapoints = vm_cache[vm]["cpu_utilization_datapoints"]
cpu_utilization_datapoints = json.loads(cpu_utilization_datapoints)
z=0
for i in cpu_utilization_datapoints:
data += str(i["Average"])
try:
data_median += float(i["Average"])
except: pass
if(len(cpu_utilization_datapoints)-1>z):
data += ","
#print data
z+=1
try:
data_median = data_median/z
except: data_median = 0
except:
try:
data = vm_cache[vm]["cpu_utilization_datapoints"]
z = 0
data_median = 0
for i in data.split(','):
z+=1
data_median += float(i)
data_median = data_median/z
except: data = ""
try:
instance_name = vm_cache[vm]["instance"]["tags"]["Name"]
except:
instance_name = vm
print 'instance_name', instance_name
color = "silver "
vm_state = vm_cache[vm]["instance"]["state"]["state"].title()
server_mac_address = vm_cache[vm]['id']
server_mac_address = str(server_mac_address).replace(':','-')
if(vm_state=="Running"):
isotope_filter_classes = " linux "
if(data_median<17):
color = "lightBlue "
if(data_median>=17 and data_median<=35):
color = "green "
isotope_filter_classes += " busy"
if(data_median>35 and data_median<=50):
color = "darkGreen "
isotope_filter_classes += " busy"
if(data_median>50 and data_median<=70):
color = "lightOrange "
isotope_filter_classes += " busy"
if(data_median>70):
isotope_filter_classes += " busy critical"
color = "red "
if data_median>85:
vm_state = "Hot hot hot!"
if(vm_state=="Stopping"):
color = "pink "
if(vm_state=="Pending"):
color = "pink "
if(vm_state=="Shutting-Down"):
color = "pink "
if(vm_state=="Stopped"):
isotope_filter_classes += " offline"
if(vm_cache[vm]['provider']!='agent'):
isotope_filter_classes += " cloud"
ajax_vms_response += "\""
ajax_vms_response += server_mac_address
ajax_vms_response += "\": {"
ajax_vms_response += "\"vmcolor\":\""
ajax_vms_response += color
ajax_vms_response += "\","
ajax_vms_response += "\"vmname\":\""
ajax_vms_response += instance_name
ajax_vms_response += "\","
ajax_vms_response += "\"vmtitle\":\""
ajax_vms_response += isotope_filter_classes
ajax_vms_response += "\","
ajax_vms_response += "\"averge\":\""
ajax_vms_response += data
ajax_vms_response += "\","
ajax_vms_response += "\"state\":\""
ajax_vms_response += vm_state
ajax_vms_response += "\","
ajax_vms_response += "\"link\":\""
if(vm_cache[vm]['provider']=='agent'):
ajax_vms_response += "/server/"+vm+"/"
else:
ajax_vms_response += "/aws/"+vm+"/"
ajax_vms_response += "\""
ajax_vms_response += "},"
if(c==len(vm_cache)-1):
ajax_vms_response += "}"
c+=1
#print '-_'*80
#print vm_cache[vm]["instance"]["state"]["state"].title(), vm
ajax_vms_response = ajax_vms_response.replace(",}","}")
if(not vm_cache): ajax_vms_response = {}
return render_to_response('ajax_virtual_machines.html', {'user':user,'ajax_vms_response':ajax_vms_response,'vms_cached_response':vm_cache,}, context_instance=RequestContext(request))
@login_required()
def ajax_aws_graphs(request, instance_id, graph_type="all"):
print '-- ajax_aws_graphs', request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
try:
vm_cache = pickle.loads(vm_cache)[instance_id]
except:
return HttpResponse("XXX " + instance_id)
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2_region = vm_cache['instance']['region']['name']
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
reservations = ec2conn.get_all_instances(instance_ids=[instance_id,])
instance = reservations[0].instances[0]
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(days=10)
metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance_id}, metric_name="CPUUtilization")[0]
cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent',period=3600)
return HttpResponse("data " + instance_id + "=" + str(instance) + " ** " + graph_type.upper())
@login_required()
def ajax_server_graphs(request, hwaddr, graph_type=""):
print '-- ajax_server_graphs, type', graph_type
print request.user
graphs_mixed_respose = []
secret = request.POST['secret']
uuid = request.POST['server']
uuid = uuid.replace('-',':')
server = mongo.servers.find_one({'secret':secret,'uuid':uuid,})
print 'debug', secret, uuid
try:
uuid = server['uuid']
except:
return HttpResponse("access denied")
server_status = "Running"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
server_status = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
server_status = "Offline"
#activity = mongo.activity.find({'uuid':uuid,}).sort('_id',-1).limit(3)
if(graph_type=="server_info"):
graphs_mixed_respose = {}
graphs_mixed_respose['name'] = server['name']
graphs_mixed_respose['server_info_hostname'] = server['hostname']
graphs_mixed_respose['cpu_used'] = server['cpu_usage']['cpu_used']
graphs_mixed_respose['memory_used'] = server['memory_usage']['memory_used_percentage']
graphs_mixed_respose['swap_used'] = server['memory_usage']['swap_used_percentage']
graphs_mixed_respose['loadavg_used'] = server['loadavg'][1]
graphs_mixed_respose['server_info_uptime'] = server['uptime']
graphs_mixed_respose['server_info_loadavg'] = server['loadavg']
graphs_mixed_respose['server_info_status'] = server_status
graphs_mixed_respose = str(graphs_mixed_respose).replace('u"','"')
graphs_mixed_respose = graphs_mixed_respose.replace("'",'"')
graphs_mixed_respose = str(graphs_mixed_respose).replace('u"','"')
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="processes"):
processes_ = []
processes = server['processes']
c=0
for line in processes:
if(c>0):
if not line:break
line = line.split(' ')
line_ = []
for i in line:
if i: line_.append(i)
line = line_
process_user = line[0]
process_pid = line[1]
process_cpu = line[2]
process_mem = line[3]
process_vsz = line[4]
process_rss = line[5]
process_tty = line[6]
process_stat = line[7]
process_start_time = line[8]+'-'+line[9]
process_command = line[10:]
process_name = clean_ps_command(process_command[0])
process = {
'pid': process_pid,
'cpu': process_cpu+'%',
'mem': process_mem+'%',
# 'vsz': process_vsz,
# 'rss': process_rss,
# 'tty': process_tty,
# 'stat': process_stat,
# 'start_time': process_start_time,
'process': process_name,
'command': ' '.join(str(x) for x in process_command).replace("[", "").replace("]","")
}
process['user'] = '<span class=\\"label label-success\\">'
if int(float(process_cpu)) > 50:
process['user'] = '<span class=\\"label label-warning\\">'
if int(float(process_cpu)) > 75:
process['user'] = '<span class=\\"label label-danger\\">'
process['user'] += process_user
process['user'] += '</span>'
processes_.append(process)
c+=1
processes = {}
processes['data'] = processes_
processes = str(processes).replace(" u'"," '").replace("[u'","['").replace("'",'"').replace("\\\\", "\\")
return HttpResponse(processes, content_type="application/json")
if(graph_type=="network_connections"):
network_connections_ = []
network_connections = server['network_connections']['listen']
for conn in network_connections:
connection = {}
connection['proto'] = conn[1]
connection['recv-q'] = conn[2]
connection['send-q'] = conn[3]
connection['address'] = conn[4]
if conn[6]:
connection['port'] = conn[5] + "/" + conn[6]
else:
connection['port'] = conn[5]
network_connections_.append(connection)
network_connections = {}
network_connections['data'] = network_connections_
network_connections = str(network_connections).replace(" u'"," '")
network_connections = str(network_connections).replace("'",'"')
return HttpResponse(network_connections, content_type="application/json")
if(graph_type=="active_network_connections"):
active_network_connections_ = []
active_network_connections = server['network_connections']['established']
for conn in active_network_connections:
connection = {}
connection['proto'] = conn[1]
connection['recv-q'] = conn[2]
connection['send-q'] = conn[3]
connection['local-address'] = conn[7]
connection['foreign-address'] = conn[4]
connection['foreign-port'] = conn[5]
active_network_connections_.append(connection)
active_network_connections = {}
active_network_connections['data'] = active_network_connections_
active_network_connections = str(active_network_connections).replace(" u'"," '")
active_network_connections = str(active_network_connections).replace("'",'"')
return HttpResponse(active_network_connections, content_type="application/json")
if(graph_type=="loadavg"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = [[],[],[]]
loadavg_specific_queries = ['1-min','5-mins','15-mins']
count = 0
for i in loadavg_specific_queries:
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.loadavg'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.loadavg'}
params_ = params
params_['m'] = params['m'] + "{avg="+i+"}"
tsdb = requests.get('http://hbase:4242/api/query', params=params_)
params = params_
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose[count].append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose[count] = sorted(graphs_mixed_respose[count], key=itemgetter(0))
graphs_mixed_respose[count] = graphs_mixed_respose[count][::-1]
count += 1
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="disks"):
print '*'*1000
print request.POST
mount_ponit = request.POST['mountPoint']
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.disks'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.disks'}
params['m'] += "{mm=disk_used,mount_point="+mount_ponit+"}"
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="cpu_usage"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.cpu'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.cpu'}
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="mem_usage" or graph_type=="swap_usage"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.memory'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.memory'}
if(graph_type=="mem_usage"):
params['m'] += "{mm=memory_used}"
if(graph_type=="swap_usage"):
params['m'] += "{mm=swap_used}"
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
if(graph_type=="network_input_packets" or graph_type=="inbound_traffic" or graph_type=="network_output_packets" or graph_type=="outbound_traffic"):
params = None
graph_interval = request.POST['interval']
graphs_mixed_respose = []
if(graph_interval=="3m"):
params = {'start':'3m-ago','m':'avg:3s-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="15m"):
params = {'start':'15m-ago','m':'avg:15s-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="1h"):
params = {'start':'1h-ago','m':'avg:1m-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="1d"):
params = {'start':'1d-ago','m':'avg:30m-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="7d"):
params = {'start':'7d-ago','m':'avg:3h-avg:' + hwaddr + '.sys.network'}
if(graph_interval=="30d"):
params = {'start':'30d-ago','m':'avg:12h-avg:' + hwaddr + '.sys.network'}
if(graph_type=="network_input_packets"):
params['m'] += "{mm=input_accept_packets}"
if(graph_type=="network_input_bytes"):
params['m'] += "{mm=input_accept_bytes}"
if(graph_type=="network_output_packets"):
params['m'] += "{mm=output_accept_packets}"
if(graph_type=="network_output_bytes"):
params['m'] += "{mm=output_accept_bytes}"
if(params):
tsdb = requests.get('http://hbase:4242/api/query',params=params)
tsdb_response = json.loads(tsdb.text)
tsdb_response = tsdb_response[0]['dps']
for i in tsdb_response:
graphs_mixed_respose.append([int(i),round(float(tsdb_response[i]),2)])
graphs_mixed_respose = sorted(graphs_mixed_respose, key=itemgetter(0))
graphs_mixed_respose = [graphs_mixed_respose[::-1],]
graphs_mixed_respose = str(graphs_mixed_respose).replace("u'","'")
return HttpResponse(graphs_mixed_respose, content_type="application/json")
return HttpResponse("I'm sorry I don't understand")
def ajax_virtual_machines_box(request):
return render_to_response('ajax_virtual_machines_box.html', locals(), context_instance=RequestContext(request))
|
"""Checks/fixes are bundled in one namespace."""
import logging
from rdflib.namespace import RDF, SKOS
from .rdftools.namespace import SKOSEXT
from .rdftools import localname, find_prop_overlap
def _hierarchy_cycles_visit(rdf, node, parent, break_cycles, status):
if status.get(node) is None:
status[node] = 1 # entered
for child in sorted(rdf.subjects(SKOS.broader, node)):
_hierarchy_cycles_visit(
rdf, child, node, break_cycles, status)
status[node] = 2 # set this node as completed
elif status.get(node) == 1: # has been entered but not yet done
if break_cycles:
logging.warning("Hierarchy cycle removed at %s -> %s",
localname(parent), localname(node))
rdf.remove((node, SKOS.broader, parent))
rdf.remove((node, SKOS.broaderTransitive, parent))
rdf.remove((node, SKOSEXT.broaderGeneric, parent))
rdf.remove((node, SKOSEXT.broaderPartitive, parent))
rdf.remove((parent, SKOS.narrower, node))
rdf.remove((parent, SKOS.narrowerTransitive, node))
else:
logging.warning(
"Hierarchy cycle detected at %s -> %s, "
"but not removed because break_cycles is not active",
localname(parent), localname(node))
elif status.get(node) == 2: # is completed already
pass
def hierarchy_cycles(rdf, fix=False):
"""Check if the graph contains skos:broader cycles and optionally break these.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing any skos:broader that overlaps
with skos:broaderTransitive.
"""
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept))
status = {}
for cs, root in top_concepts:
_hierarchy_cycles_visit(
rdf, root, None, fix, status=status)
# double check that all concepts were actually visited in the search,
# and visit remaining ones if necessary
recheck_top_concepts = False
for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)):
if conc not in status:
recheck_top_concepts = True
_hierarchy_cycles_visit(
rdf, conc, None, fix, status=status)
return recheck_top_concepts
def disjoint_relations(rdf, fix=False):
"""Check if the graph contains concepts connected by both of the semantically
disjoint semantic skos:related and skos:broaderTransitive (S27),
and optionally remove the involved skos:related relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:related relations that
overlap with skos:broaderTransitive.
"""
for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)):
if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)):
if fix:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"removing skos:related",
conc1, conc2)
rdf.remove((conc1, SKOS.related, conc2))
rdf.remove((conc2, SKOS.related, conc1))
else:
logging.warning(
"Concepts %s and %s connected by both "
"skos:broaderTransitive and skos:related, "
"but keeping it because keep_related is enabled",
conc1, conc2)
def hierarchical_redundancy(rdf, fix=False):
"""Check for and optionally remove extraneous skos:broader relations.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing skos:broader relations between
concepts that are otherwise connected by skos:broaderTransitive.
"""
for conc, parent1 in sorted(rdf.subject_objects(SKOS.broader)):
for parent2 in sorted(rdf.objects(conc, SKOS.broader)):
if parent1 == parent2:
continue # must be different
if parent2 in rdf.transitive_objects(parent1, SKOS.broader):
if fix:
logging.warning(
"Eliminating redundant hierarchical relationship: "
"%s skos:broader %s",
conc, parent2)
rdf.remove((conc, SKOS.broader, parent2))
rdf.remove((conc, SKOS.broaderTransitive, parent2))
rdf.remove((parent2, SKOS.narrower, conc))
rdf.remove((parent2, SKOS.narrowerTransitive, conc))
else:
logging.warning(
"Redundant hierarchical relationship "
"%s skos:broader %s found, but not eliminated "
"because eliminate_redundancy is not set",
conc, parent2)
def preflabel_uniqueness(rdf, policy='all'):
"""Check that concepts have no more than one value of skos:prefLabel per
language tag (S14), and optionally move additional values to skos:altLabel.
:param Graph rdf: An rdflib.graph.Graph object.
:param str policy: Policy for deciding which value to keep as prefLabel
when multiple prefLabels are found. Possible values are 'shortest'
(keep the shortest label), 'longest' (keep the longest label),
'uppercase' (prefer uppercase), 'lowercase' (prefer uppercase) or
'all' (keep all, just log the problems). Alternatively, a list of
policies to apply in order, such as ['shortest', 'lowercase'], may
be used.
"""
resources = set(
(res for res, label in rdf.subject_objects(SKOS.prefLabel)))
policy_fn = {
'shortest': len,
'longest': lambda x: -len(x),
'uppercase': lambda x: int(x[0].islower()),
'lowercase': lambda x: int(x[0].isupper())
}
if type(policy) not in (list, tuple):
policies = policy.split(',')
else:
policies = policy
for p in policies:
if p not in policy_fn:
logging.critical("Unknown preflabel-policy: %s", policy)
return
def key_fn(label):
return [policy_fn[p](label) for p in policies] + [str(label)]
for res in sorted(resources):
prefLabels = {}
for label in rdf.objects(res, SKOS.prefLabel):
lang = label.language
if lang not in prefLabels:
prefLabels[lang] = []
prefLabels[lang].append(label)
for lang, labels in prefLabels.items():
if len(labels) > 1:
if policies[0] == 'all':
logging.warning(
"Resource %s has more than one prefLabel@%s, "
"but keeping all of them due to preflabel-policy=all.",
res, lang)
continue
chosen = sorted(labels, key=key_fn)[0]
logging.warning(
"Resource %s has more than one prefLabel@%s: "
"choosing %s (policy: %s)",
res, lang, chosen, str(policy))
for label in labels:
if label != chosen:
rdf.remove((res, SKOS.prefLabel, label))
rdf.add((res, SKOS.altLabel, label))
def label_overlap(rdf, fix=False):
"""Check if concepts have the same value for any two of the pairwise
disjoint properties skos:prefLabel, skos:altLabel and skos:hiddenLabel
(S13), and optionally remove the least significant property.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix: Fix the problem by removing the least significant property
(altLabel or hiddenLabel).
"""
def label_warning(res, label, keep, remove):
if fix:
logging.warning(
"Resource %s has '%s'@%s as both %s and %s; removing %s",
res, label, label.language, keep, remove, remove
)
else:
logging.warning(
"Resource %s has '%s'@%s as both %s and %s",
res, label, label.language, keep, remove
)
for res, label in find_prop_overlap(rdf, SKOS.prefLabel, SKOS.altLabel):
label_warning(res, label, 'prefLabel', 'altLabel')
if fix:
rdf.remove((res, SKOS.altLabel, label))
for res, label in find_prop_overlap(rdf, SKOS.prefLabel, SKOS.hiddenLabel):
label_warning(res, label, 'prefLabel', 'hiddenLabel')
if fix:
rdf.remove((res, SKOS.hiddenLabel, label))
for res, label in find_prop_overlap(rdf, SKOS.altLabel, SKOS.hiddenLabel):
label_warning(res, label, 'altLabel', 'hiddenLabel')
if fix:
rdf.remove((res, SKOS.hiddenLabel, label))
|
import unittest, sys, os
sys.path[:0] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from saklient.cloud.enums.eserverinstancestatus import EServerInstanceStatus
class TestEnum(unittest.TestCase):
def test_should_be_defined(self):
self.assertEqual(EServerInstanceStatus.UP, "up");
self.assertEqual(EServerInstanceStatus.DOWN, "down");
def test_should_be_compared(self):
self.assertEqual(EServerInstanceStatus.compare("up", "up"), 0);
self.assertEqual(EServerInstanceStatus.compare("up", "down"), 1);
self.assertEqual(EServerInstanceStatus.compare("down", "up"), -1);
self.assertEqual(EServerInstanceStatus.compare("UNDEFINED-SYMBOL", "up"), None);
self.assertEqual(EServerInstanceStatus.compare("up", "UNDEFINED-SYMBOL"), None);
self.assertEqual(EServerInstanceStatus.compare(None, "up"), None);
self.assertEqual(EServerInstanceStatus.compare("up", None), None);
self.assertEqual(EServerInstanceStatus.compare(None, None), None);
if __name__ == '__main__':
unittest.main()
|
__author__ = 'las3wh'
print('goodbye')
|
def superstring(arr, accumulator=''):
# We now have all strings
if len(arr) == 0:
return accumulator
# Initial call
elif len(accumulator) == 0:
accumulator = arr.pop(0)
return superstring(arr, accumulator)
# Recursive call
else:
for i in range(len(arr)):
sample = arr[i]
l = len(sample)
for p in range(l / 2):
q = l - p
if accumulator.startswith(sample[p:]):
arr.pop(i)
return superstring(arr, sample[:p] + accumulator)
if accumulator.endswith(sample[:q]):
arr.pop(i)
return superstring(arr, accumulator + sample[q:])
f = open("rosalind_long.txt", "r")
dnas = {}
currentKey = ''
for content in f:
# Beginning of a new sample
if '>' in content:
key = content.rstrip().replace('>', '')
currentKey = key
dnas[currentKey] = ''
else:
dnas[currentKey] += content.rstrip()
print superstring(dnas.values())
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('attracker_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='segment',
name='additional_miles',
field=models.FloatField(default=0, verbose_name='Non-AT miles hiked with the segment'),
),
]
|
import csv
import httplib2
import json
import os
import sys
from bs4 import BeautifulSoup
mbz_instDict = {}
h = httplib2.Http()
link = 'https://musicbrainz.org/instruments'
uri_root = 'https://musicbrainz.org'
resp, html_doc = h.request(link, "GET")
soup = BeautifulSoup(html_doc, "lxml")
for result in soup.body.select(
'a[href^"/instrument/"]'):
label = result.contents[0].string
uri = ''.join([uri_root, result.get('href')])
mbz_instDict[str(uri)] = label
mbz_instDict_path = os.path.join(
os.path.dirname(__file__), os.pardir, 'source-files', 'mbz_instDict.json')
mbz_instList_path = os.path.join(
os.path.dirname(__file__), os.pardir, 'source-files', 'mbz_instList.csv')
with open(mbz_instDict_path, 'w') as f1:
json.dump(mbz_instDict, f1)
with open(mbz_instList_path, 'w', newline='') as csvfile:
w = csv.writer(csvfile, dialect='excel', delimiter=',')
for k,v in mbz_instDict.items():
w.writerow([k,v])
print("Finished gathering MusicBrainz instrument URIs and labels")
|
from pyspark.sql import SparkSession, Row
from pyspark.ml.feature import Word2Vec, Tokenizer, StopWordsRemover, Word2VecModel
import sys;
from string import punctuation
def strip_punctuation(arr):
return [''.join(c for c in s if c not in punctuation) for s in arr]
def main():
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
df_categories = spark.read.json("allcategories4rdd/allcategories.jsonl")
tokenizer = Tokenizer(inputCol="skillText", outputCol="words")
tokenized = tokenizer.transform(df_categories)
remover = StopWordsRemover(inputCol="words", outputCol="filtered")
removed = remover.transform(tokenized)
stripped = removed.select('filtered').rdd.map(lambda x: strip_punctuation(x[0]))\
.map(lambda x: Row(filtered=x)).toDF(['filtered'])
# word2vec = Word2Vec(vectorSize=100, inputCol="filtered", outputCol="result")
# model = word2vec.fit(stripped)
#model.save("word2vec-model")
model = Word2VecModel.load("word2vec-model")
synonyms = model.findSynonyms(sys.argv[1], 10)
synonyms.show(truncate=False)
# for word, cosine_distance in synonyms:
# print("{}: {}".format(word, cosine_distance))
if __name__ == '__main__':
main()
|
from django.shortcuts import render,get_object_or_404
from django.http import Http404
from django.http import HttpResponse
def home(request):
return render(request,'index.html');
|
from clickerft.cft import Cft
from time import sleep
class Suite(Cft):
def test_buy_item_4(self):
while int(self.clicksPerGeneration.text) < 2:
if int(self.clicksOwned.text) < 1:
sleep(.5)
continue
self.increaseClicksPerGeneration.click()
while int(self.tr1.text) < int(self.pi4r1.text):
self.click_r_test('r1')
while int(self.tr2.text) < int(self.pi4r2.text):
self.click_r_test('r2')
self.i4.click()
assert int(self.oi4.text) == 1
sleep(1)
# todo put the modifier into the DOM to parse
assert int(self.tr1.text) == 5
pass
if __name__ == '__main__':
Suite()
|
import os
from clang.cindex import Config, Index, TypeKind
class ClangExtractor(object):
def __init__(self, libclang_path, srcdir):
if Config.library_file != libclang_path:
Config.set_library_file(libclang_path)
self.srcdir = srcdir
def extract(self):
protos = dict()
for dirpath, dirnames, filenames in os.walk(self.srcdir):
for fname in filenames:
fpath = dirpath + "/" + fname
fext = fname.split(".")[-1]
if fext == "c" or fext == "h":
index = Index.create()
tu = index.parse(fpath)
self.__clang_find_protos(tu.cursor, protos)
return protos
def __clang_find_protos(self, node, protos):
if (node.type.kind == TypeKind.FUNCTIONPROTO): # or node.type.kind == TypeKind.FUNCTIONNOPROTO):
if node.spelling not in protos.keys():
protos[node.spelling] = list()
if len(protos[node.spelling]) == 0:
if (node.result_type.spelling == "Lisp_Object"):
protos[node.spelling].append("void *")
else:
protos[node.spelling].append(node.result_type.get_canonical().spelling)
for c in node.get_arguments():
if (c.type.spelling == "Lisp_Object"):
protos[node.spelling].append("void *")
else:
protos[node.spelling].append(c.type.get_canonical().spelling)
if node.type.is_function_variadic():
protos[node.spelling].append("...")
for c in node.get_children():
self.__clang_find_protos(c, protos)
|
"""Contains utility functions for working with the shell"""
from contextlib import contextmanager
import datetime
from decimal import Decimal
import json
import pprint
import sys
import time
import traceback
SHELL_CONTROL_SEQUENCES = {
'BLUE': '\033[34m',
'LTBLUE': '\033[94m',
'GREEN': '\033[32m',
'LTGREEN': '\033[92m',
'YELLOW': '\033[33m',
'LTYELLOW': '\033[93m',
'RED': '\033[31m',
'LTRED': '\033[91m',
'CYAN': '\033[36m',
'LTCYAN': '\033[96m',
'MAGENTA': '\033[35m',
'LTMAGENTA': '\033[95m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
}
BLUE = "{BLUE}"
LTBLUE = "{LTBLUE}"
GREEN = "{GREEN}"
LTGREEN = "{LTGREEN}"
YELLOW = "{YELLOW}"
LTYELLOW = "{LTYELLOW}"
RED = "{RED}"
LTRED = "{LTRED}"
CYAN = "{CYAN}"
LTCYAN = "{LTCYAN}"
MAGENTA = "{MAGENTA}"
LTMAGENTA = "{LTMAGENTA}"
ENDC = "{ENDC}"
BOLD = "{BOLD}"
UNDERLINE = "{UNDERLINE}"
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
return float(o)
elif isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
return str(o)
return super(JSONEncoder, self).default(o)
def read_json(timeout=0):
"""Read json data from stdin"""
data = read()
if data:
return json.loads(data)
def write_output(writer, *output, **kwargs):
"""Write the output to the writer, used for printing to stdout/stderr"""
to_print = kwargs.get("sep", " ").join(output) + kwargs.get("end", "\n")
if isinstance(writer, list):
writer.append(to_print)
else:
writer.write(to_print)
if kwargs.get("flush"):
writer.flush()
def write_json(output, end='', raw=False, file=None, flush=False):
file = file or sys.stdout
if len(output) == 1:
output = output[0]
if raw:
json.dump(output, file, separators=(',', ':'), cls=JSONEncoder)
else:
json.dump(output, file, indent=4, sort_keys=True, cls=JSONEncoder)
if flush:
file.flush()
if end:
write_output(file, '', end=end, sep='', flush=flush)
def read():
"""Read from stdin"""
return sys.stdin.read()
def choice(choices, msg='Enter your choice: ', color=True, default=None, **kwargs):
if isinstance(choices, dict):
choices_dict = choices
choices = sorted(choices_dict.keys())
elif isinstance(choices, (tuple, list)):
choices_dict = None
choice_msg = ['']
validate = []
for idx, item in enumerate(choices):
if color:
choice_msg.append("\t{LTYELLOW}%d{LTMAGENTA}: %s" % (idx, str(item)))
else:
choice_msg.append("\t%d: %s" % (idx, str(item)))
validate.append(str(idx))
choice_msg.append("")
if color:
choice_msg.append("{LTMAGENTA}{BOLD}"+msg+"{ENDC}")
else:
choice_msg.append(msg)
output = ask("\n".join(choice_msg), validate=validate, default=default, color=None, **kwargs)
if choices_dict:
key = choices[int(output)]
return choices_dict[key]
else:
return choices[int(output)]
def ask(*args, **kwargs):
"""Ask for input"""
if not sys.stdin.isatty():
error("Cannot ask user for input, no tty exists")
sys.exit(1)
print_args = list(args)
print_args.append(kwargs.get("end", "\n"))
if kwargs["color"]:
print_args.insert(0, "{" + kwargs["color"] + "}")
print_args.append(ENDC)
while True:
stderr(*print_args, end='', **kwargs)
in_ = input()
if in_:
if not kwargs["validate"]:
return in_
if isinstance(kwargs["validate"], (tuple, list)) and in_ in kwargs["validate"]:
return in_
if callable(kwargs["validate"]) and kwargs["validate"](in_):
return in_
if kwargs["default"] is not None:
return kwargs["default"]
if kwargs["error_msg"] is not None:
error("\n" + kwargs["error_msg"] + "\n")
else:
error("\nYou didn't enter a valid choice!\n")
time.sleep(1)
def pretty(output):
"""Pretty format for shell output"""
return pprint.pformat(output, indent=2, width=100)
def _shell_format(output, **kwargs):
"""Formats the output for printing to a shell"""
kwargs.update(SHELL_CONTROL_SEQUENCES)
for idx, item in enumerate(output):
try:
output[idx] = item.format(**kwargs)
except KeyError:
pass # Can happen if some item is not in the kwargs dict
return output
def _convert_print(*args):
"""Convert the given arguments to a string for printing. Concantenate them together"""
output = []
for arg in args:
if not isinstance(arg, str):
arg = pretty(arg)
output.append(arg)
return output
def stdout_to_stderr():
"""Temporarily redirects stdout to stderr. Returns no-arg function to turn it back on."""
stdout = sys.stdout
sys.stdout = sys.stderr
def restore_stdout():
sys.stdout = stdout
return restore_stdout
def write_info_output(writer, *output, **kwargs):
if kwargs.get("json"):
return write_json(output, **kwargs)
if not kwargs.get("raw", False):
output = _convert_print(*output)
output = _shell_format(output, **kwargs)
write_output(writer, *output, **kwargs)
def stdout(*output, **kwargs):
"""Print to stdout. Supports colors"""
write_info_output(sys.stdout, *output, **kwargs)
def stderr(*output, **kwargs):
"""Print to stderr. Supports colors"""
write_info_output(sys.stderr, *output, **kwargs)
def print_color(color, *output, **kwargs):
"""Print message to stderr in the given color"""
print_args = list(output)
print_args.append(ENDC)
if "file" in kwargs:
write_output(kwargs["file"], *output, **kwargs)
else:
stderr(color, *print_args, **kwargs)
def debug(*output, **kwargs):
"""Print debug message to stderr"""
print_color(BLUE, *output, **kwargs)
def info(*output, **kwargs):
"""Print info message to stderr"""
print_color(GREEN, *output, **kwargs)
def warning(*output, **kwargs):
"""Print warning message to stderr"""
print_color(YELLOW, *output, **kwargs)
def error(*output, **kwargs):
"""Print error message to stderr"""
print_color(RED, *output, **kwargs)
def exception(*output, **kwargs):
"""Print error message to stderr with last exception info"""
exc = traceback.format_exc()
print_args = list(output)
print_args.append("\nAn exception occurred:\n{exc}".format(exc=exc))
print_color(RED, *print_args, **kwargs)
def timestamp():
return int(time.time())
@contextmanager
def elapsed(output, **kwargs):
"""Context Manager that prints to stderr how long a process took"""
start = timestamp()
info("Starting: ", output, **kwargs)
yield
info("Completed: " + output + " {MAGENTA}(Elapsed Time: {elapsed}s){ENDC}", elapsed=timestamp()-start, **kwargs)
def elapsed_decorator(output):
"""Decorator that prints to stderr how long a process took"""
def wrapper(fn):
def wrapped_fn(*args, **kwargs):
with elapsed(output, **kwargs):
fn(*args, **kwargs)
return wrapped_fn
return wrapper
def print_section(color, *output, **kwargs):
"""Prints a section title header"""
output = ["\n\n", 60 * "#", "\n", "# "] + list(output) + ["\n", 60 * "#", "\n"]
print_color(color, *output, end="\n", **kwargs)
def print_table(headers, *table_data, **kwargs):
if not table_data:
return
if isinstance(table_data[0], dict):
all_data = []
for d in table_data:
new_output = []
for header in headers:
new_output.append(d[header])
all_data.append(new_output)
else:
all_data = table_data
print(all_data)
all_data.insert(0, headers)
widths = [max(len(d[idx]) for d in all_data) for idx, _ in enumerate(headers)]
output = []
for row_idx, data in enumerate(all_data):
line = []
pad = "<" if row_idx == 0 else ">"
for idx, item in enumerate(data):
print(item)
print(idx)
formatter = "{item: " + pad + str(widths[idx]) + "}"
line.append(formatter.format(item=item))
output.append("| " + " | ".join(line) + " |")
write_output(kwargs.get("file", sys.stderr), *output, **kwargs)
|
"""
Django settings for school project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
BASE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, '../templates'),
)
SECRET_KEY = 'koeorn$p_9&6!%1!84=erv*)#40-f$&z+_hq1^a1+2#93_ev%y'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'school.urls'
WSGI_APPLICATION = 'school.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'asia/chongqing'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
"""
A simple client to query a TensorFlow Serving instance.
Example:
$ python client.py \
--images IMG_0932_sm.jpg \
--num_results 10 \
--model_name inception \
--host localhost \
--port 9000 \
--timeout 10
Author: Grant Van Horn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tfserver
def parse_args():
parser = argparse.ArgumentParser(description='Command line classification client. Sorts and prints the classification results.')
parser.add_argument('--images', dest='image_paths',
help='Path to one or more images to classify (jpeg or png).',
type=str, nargs='+', required=True)
parser.add_argument('--num_results', dest='num_results',
help='The number of results to print. Set to 0 to print all classes.',
required=False, type=int, default=0)
parser.add_argument('--model_name', dest='model_name',
help='The name of the model to query.',
required=False, type=str, default='inception')
parser.add_argument('--host', dest='host',
help='Machine host where the TensorFlow Serving model is.',
required=False, type=str, default='localhost')
parser.add_argument('--port', dest='port',
help='Port that the TensorFlow Server is listening on.',
required=False, type=int, default=9000)
parser.add_argument('--timeout', dest='timeout',
help='Amount of time to wait before failing.',
required=False, type=int, default=10)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Read in the image bytes
image_data = []
for fp in args.image_paths:
with open(fp) as f:
data = f.read()
image_data.append(data)
# Get the predictions
t = time.time()
predictions = tfserver.predict(image_data, model_name=args.model_name,
host=args.host, port=args.port, timeout=args.timeout
)
dt = time.time() - t
print("Prediction call took %0.4f seconds" % (dt,))
# Process the results
results = tfserver.process_classification_prediction(predictions, max_classes=args.num_results)
# Print the results
for i, fp in enumerate(args.image_paths):
print("Results for image: %s" % (fp,))
for name, score in results[i]:
print("%s: %0.3f" % (name, score))
print()
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('merchants', '0006_classfy'),
]
operations = [
migrations.CreateModel(
name='RegionItem',
fields=[
('name', models.CharField(max_length=20)),
('id', models.CharField(max_length=10, serialize=False, primary_key=True)),
],
options={
'db_table': 'T_region',
},
),
migrations.RenameModel(
old_name='Classfy',
new_name='ClassfyItem',
),
migrations.RenameModel(
old_name='MerchantInfo',
new_name='MerchantItem',
),
]
|
import json
import os
from processes.postgres import Postgres
from processes.gather_exception import GatherException
try:
DB_SERVER = os.environ['DB_SERVER']
DB_PORT = os.environ['DB_PORT']
DB_DATABASE = os.environ['DB_DATABASE']
DB_USER = os.environ['DB_USER']
DB_PASSWORD = os.environ['DB_PASSWORD']
except KeyError:
try:
from processes.GLOBALS import DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD
except ImportError:
print("No parameters provided")
exit()
class Main(object):
def __init__(self):
self.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
self.source_topic = 'youtube'
self.destination_topic = 'movies'
def run(self, data):
"""
This inserts the relevant json information
into the table kino.movies.
:param data: json data holding information on films.
"""
imdb_id = data['imdb_id']
omdb_movie_data = data['omdb_main']
tmdb_movie_data = data['tmdb_main']
sql = """insert into kino.languages(language)
select y.language
from json_to_recordset(%s) x (original_language varchar(1000))
join kino.iso2language y
on x.original_language = y.iso3166
where language not in (select language
from kino.languages)"""
self.pg.pg_cur.execute(sql, (json.dumps(tmdb_movie_data),))
self.pg.pg_conn.commit()
# We delete our record from kino.movies first.
# Due to foreign keys with 'on delete cascade', this clears all records from
# the database associated with that imdb_id.
sql = """delete from kino.movies
where imdb_id = '{0}'""".format(imdb_id)
self.pg.pg_cur.execute(sql)
self.pg.pg_conn.commit()
# We also delete any records in errored attached to this imdb_id, as
# we have successfully gathered information for the film.
sql = """delete from kino.errored
where imdb_id = '{0}'""".format(imdb_id)
self.pg.pg_cur.execute(sql)
self.pg.pg_conn.commit()
sql = """insert into kino.movies (imdb_id, title, runtime, rated, released, orig_language, plot, tstamp)
select x.imdb_id
, y.title
, y.runtime
, x.rated
, y.release_date::date
, z.language
, y.plot
, CURRENT_DATE
from json_to_recordset(%s) x ( imdb_id varchar(15), rated varchar(10) )
join json_to_recordset(%s) y ( imdb_id varchar(15), title varchar(1000), runtime integer
, release_date date, plot varchar(4000), original_language varchar(1000))
on x.imdb_id = y.imdb_id
join kino.iso2language z
on y.original_language = z.iso3166
"""
self.pg.pg_cur.execute(sql, (json.dumps(omdb_movie_data), json.dumps(tmdb_movie_data)))
if self.pg.pg_cur.rowcount != 1:
raise GatherException(omdb_movie_data[0]['imdb_id'], 'No insert into movies, most likely due to a new language')
self.pg.pg_conn.commit()
sql = """insert into kino.kino_ratings (imdb_id, rating) values (%s, 3) on conflict do nothing"""
self.pg.pg_cur.execute(sql, (imdb_id,))
self.pg.pg_conn.commit()
return data
|
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify, unhexlify
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
import sscoin_hash
BIP0031_VERSION = 60000
MY_VERSION = 70206 # current MIN_PEER_PROTO_VERSION
MY_SUBVERSION = b"/python-mininode-tester:0.0.2/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000L # 1 btc in satoshis
mininode_socket_map = dict()
mininode_lock = RLock()
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def sscoinhash(s):
return sscoin_hash.getPoWHash(s)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000L:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(unhexlify(hex_string.encode('ascii'))))
return obj
def ToHex(obj):
return hexlify(obj.serialize()).decode('ascii')
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(sscoinhash(r))
self.hash = encode(sscoinhash(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in sscoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
}
MAGIC_BYTES = {
"mainnet": b"\xbf\x0c\x6b\xbd", # mainnet
"testnet3": b"\xce\xe2\xca\xff", # testnet3
"regtest": b"\xfc\xc1\xb7\xdc" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Sscoin Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print 'got_data:', repr(e)
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
import urllib.parse
import urllib.request
import json
import logging
import requests
log = logging.getLogger('tyggbot')
class APIBase:
@staticmethod
def _get(url, headers={}):
try:
req = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(req)
except Exception as e:
return None
try:
return response.read().decode('utf-8')
except Exception as e:
log.error(e)
return None
return None
@staticmethod
def _get_json(url, headers={}):
try:
data = APIBase._get(url, headers)
if data:
return json.loads(data)
else:
return data
except Exception:
log.exception('Caught exception while trying to parse json data.')
return None
return None
def get_url(self, endpoints=[], parameters={}):
return self.base_url + '/'.join(endpoints) + ('' if len(parameters) == 0 else '?' + urllib.parse.urlencode(parameters))
def getraw(self, endpoints=[], parameters={}):
return APIBase._get(self.get_url(endpoints, parameters), self.headers)
def get(self, endpoints, parameters={}):
try:
data = self.getraw(endpoints, parameters)
if data:
return json.loads(data)
else:
return data
except Exception as e:
log.error(e)
return None
return None
def post(self, endpoints=[], parameters={}, data={}):
try:
req = urllib.request.Request(self.get_url(endpoints, parameters), urllib.parse.urlencode(data).encode('utf-8'), self.headers)
response = urllib.request.urlopen(req)
except Exception as e:
log.error(e)
return None
try:
return response.read().decode('utf-8')
except Exception as e:
log.error(e)
return None
return None
class ChatDepotAPI(APIBase):
def __init__(self):
APIBase.__init__(self)
self.base_url = 'http://chatdepot.twitch.tv/'
self.headers = {
'Accept': 'application/vnd.twitchtv.v3+json'
}
class ImraisingAPI(APIBase):
def __init__(self, apikey):
APIBase.__init__(self)
self.base_url = 'https://imraising.tv/api/v1/'
self.headers = {
'Authorization': 'APIKey apikey="{0}"'.format(apikey),
'Content-Type': 'application/json',
}
class StreamtipAPI(APIBase):
def __init__(self, client_id, access_token):
APIBase.__init__(self)
self.base_url = 'https://streamtip.com/api/'
self.headers = {
'Authorization': client_id + ' ' + access_token,
}
class TwitchAPI(APIBase):
def __init__(self, client_id=None, oauth=None, type='kraken'):
APIBase.__init__(self)
self.base_url = 'https://api.twitch.tv/{0}/'.format(type)
self.headers = {
'Accept': 'application/vnd.twitchtv.v3+json',
}
if client_id:
self.headers['Client-ID'] = client_id
if oauth:
self.headers['Authorization'] = 'OAuth ' + oauth
class SafeBrowsingAPI:
def __init__(self, apikey, appname, appvers):
self.apikey = apikey
self.appname = appname
self.appvers = appvers
return
def check_url(self, url):
base_url = 'https://sb-ssl.google.com/safebrowsing/api/lookup?client=' + self.appname + '&key=' + self.apikey + '&appver=' + self.appvers + '&pver=3.1&url='
url2 = base_url + urllib.parse.quote(url, '')
r = requests.get(url2)
if r.status_code == 200:
return True # malware or phishing
return False # some handling of error codes should be added, they're just ignored for now
|
'''
Created on Dec 13, 2015
@author: Shannon Litwin
'''
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
import Lib_LCD as LCD
import Lib_Main as BBB
import sys
import signal
import time
leftForward = "P8_46"
leftBackward = "P8_45"
rightForward = "P9_14"
rightBackward = "P9_16"
def Control_C_Exit(signal, frame):
GPIO.cleanup()
PWM.cleanup()
print("\nProgram halted! Exiting program!")
sys.exit()
signal.signal(signal.SIGINT, Control_C_Exit) # For cleaning up mid run
'''Keep to show Dr. Berry'''
LCD.init()
time.sleep(1)
LCD.backlight("on")
time.sleep(2)
LCD.backlight("off")
time.sleep(1)
line_message = "Hi Dr. Berry."
LCD.write_line(line_message)
time.sleep(5)
LCD.cursor_home()
long_message = "This is 35 chars and needs 2 lines."
LCD.write_screen(long_message)
time.sleep(5)
LCD.cursor_home()
long_message = "Which is fine because the screen can hold up to 80 characters."
LCD.write_screen(long_message)
time.sleep(5)
LCD.cursor_home()
long_message = "However, if the message is too long it will truncate. That is why you cannot read this entire message."
LCD.write_screen(long_message)
time.sleep(5)
LCD.clear()
m1 = "It works 1"
m2 = "It works 2"
m3 = "It works 3"
m4 = "It works 4"
time.sleep(1)
LCD.goto_line(4)
LCD.write_line(m4)
time.sleep(1)
LCD.goto_line(3)
LCD.write_line(m3)
time.sleep(1)
LCD.goto_line(2)
LCD.write_line(m2)
time.sleep(1)
LCD.goto_line(1)
LCD.write_line(m1)
LCD.clear()
BBB.cleanup_all()
|
"""
Custom managers for Django models registered with the tagging
application.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
class ModelTagManager(models.Manager):
"""
A manager for retrieving tags for a particular model.
"""
def __init__(self, tag_model):
super(ModelTagManager, self).__init__()
self.tag_model = tag_model
def get_query_set(self):
content_type = ContentType.objects.get_for_model(self.model)
return self.tag_model.objects.filter(
items__content_type__pk=content_type.pk).distinct()
def related(self, tags, *args, **kwargs):
return self.tag_model.objects.related_for_model(tags, self.model, *args, **kwargs)
def usage(self, *args, **kwargs):
return self.tag_model.objects.usage_for_model(self.model, *args, **kwargs)
class ModelTaggedItemManager(models.Manager):
"""
A manager for retrieving model instances based on their tags.
"""
def __init__(self, tag_model):
super(ModelTaggedItemManager, self).__init__()
self.intermediary_table_model = tag_model.objects.intermediary_table_model
def related_to(self, obj, queryset=None, num=None):
if queryset is None:
return self.intermediary_table_model.objects.get_related(obj, self.model, num=num)
else:
return self.intermediary_table_model.objects.get_related(obj, queryset, num=num)
def with_all(self, tags, queryset=None):
if queryset is None:
return self.intermediary_table_model.objects.get_by_model(self.model, tags)
else:
return self.intermediary_table_model.objects.get_by_model(queryset, tags)
def with_any(self, tags, queryset=None):
if queryset is None:
return self.intermediary_table_model.objects.get_union_by_model(self.model, tags)
else:
return self.intermediary_table_model.objects.get_union_by_model(queryset, tags)
class TagDescriptor(object):
"""
A descriptor which provides access to a ``ModelTagManager`` for
model classes and simple retrieval, updating and deletion of tags
for model instances.
"""
def __init__(self, tag_model):
self.tag_model = tag_model
def __get__(self, instance, owner):
if not instance:
tag_manager = ModelTagManager(self.tag_model)
tag_manager.model = owner
return tag_manager
else:
return self.tag_model.objects.get_for_object(instance)
def __set__(self, instance, value):
self.tag_model.objects.update_tags(instance, value)
def __del__(self, instance):
self.tag_model.objects.update_tags(instance, [])
|
import parsers
import tokenizer
import context
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in unicode(text))
class Tag(object):
def __init__(self, args):
self.args = args
def render(self, context):
return ''
class PairedTag(Tag):
def __init__(self, args):
self.children = []
super(PairedTag, self).__init__(args)
def render(self, context):
char_buffer = ''
for child in self.children:
char_buffer += unicode(child.render(context))
return char_buffer
class SingleLineTag(Tag):
pass
class TemplateContentTag(PairedTag):
pass
class LiteralContent(Tag):
def __init__(self, content):
self.content = content
def render(self, context):
return unicode(self.content)
class EscapedContentTag(Tag):
def render(self, context):
ct = tokenizer.ExpressionTokenizer()
parser = parsers.TopDownParser(ct.yield_tokens(' '.join(self.args)))
return html_escape(parser.parse().eval(context))
class UnescapedContentTag(Tag):
def render(self, context):
ct = tokenizer.ExpressionTokenizer()
parser = parsers.TopDownParser(ct.yield_tokens(' '.join(self.args)))
return unicode(parser.parse().eval(context))
class CommentTag(Tag):
def render(self, context):
return ''
class IfTag(PairedTag):
closing_literal = 'if'
def render(self, context):
#if tag can have an else tag too, so we need to first check for that.
#this is a stack of groups to evaluate in order
expression_groups = []
current_group = []
current_group_conditional = self
for child in self.children:
if type(child) == ElseTag:
expression_groups.append((current_group_conditional, current_group))
current_group_conditional = child
current_group = []
else:
current_group.append(child)
expression_groups.append((current_group_conditional, current_group))
retval = ''
for conditional, tag_group in expression_groups:
ct = tokenizer.ExpressionTokenizer()
parser = parsers.TopDownParser(ct.yield_tokens(' '.join(conditional.args)))
if len(parser.tokens):
if parser.parse().eval(context):
for tag in tag_group:
retval += unicode(tag.render(context))
break
else:
for tag in tag_group:
retval += unicode(tag.render(context))
break
return retval
class ElseTag(Tag):
def render(self, context):
raise Exception("Cannot call render directly on else tag")
class ForTag(PairedTag):
closing_literal = 'for'
def render(self, var_context):
if len(self.args) <> 3:
raise Exception('The for tag takes exactly three arguments following the pattern instance_var in iterable')
for_child_tags = []
else_child_tags = []
in_else_tag = False
for child in self.children:
if in_else_tag:
else_child_tags.append(child)
else:
for_child_tags.append(child)
if type(child) == ElseTag:
in_else_tag = True
class_var = self.args[0]
iterable = var_context.eval(self.args[2])
retval = ''
cnt = 0
if iterable and len(iterable):
for item in iterable:
#add the current class var in the context dictionary for all children. it could
#overlay something already existing, but that's fine.
local_context = context.ContextWrap(var_context.context, { class_var: item, '#index' : cnt })
cnt+=1
for child in for_child_tags:
retval += child.render(local_context)
else:
for child in else_child_tags:
retval += child.render(var_context)
return retval
class VerbatimTag(PairedTag):
closing_literal = 'verbatim'
TagMap = {
'render' : EscapedContentTag,
':' : EscapedContentTag,
'>' : UnescapedContentTag,
'#' : CommentTag,
'if' : IfTag,
'else' : ElseTag,
'elif' : ElseTag,
'verbatim' : VerbatimTag,
'for' : ForTag,
}
|
import os
import sys
import json
from optional_django import staticfiles
from optional_django.serializers import JSONEncoder
from optional_django.safestring import mark_safe
from optional_django import six
from js_host.function import Function
from js_host.exceptions import FunctionError
from react.render import RenderedComponent
from react.exceptions import ComponentSourceFileNotFound
from react.exceptions import ReactRenderingError
from react_router.conf import settings
from react_router.templates import MOUNT_JS
from react_router.bundle import bundle_component
from webpack.compiler import WebpackBundle
class RouteRenderedComponent(RenderedComponent):
def get_client_asset(self):
client_asset = None
bundled_component = self.get_bundle()
assets = bundled_component.get_assets()
for asset in assets:
if asset['path'] == self.path_to_source:
client_asset = asset
break
return client_asset
def get_var(self):
client_asset = self.get_client_asset()
if client_asset:
return 'client'
raise Exception("Client asset not found.")
def render_js(self):
client_asset = self.get_client_asset()
if client_asset:
client_bundle = mark_safe(WebpackBundle.render_tag(client_asset['url']))
return mark_safe(
'\n{bundle}\n<script>\n{mount_js}\n</script>\n'.format(
bundle=client_bundle,
mount_js=self.render_mount_js(),
)
)
def render_mount_js(self):
return mark_safe(
MOUNT_JS.format(
var=self.get_var(),
props=self.serialized_props or 'null',
container_id=self.get_container_id()
)
)
class RouteRedirect(object):
def __init__(self, pathname, query = None, state = None, *args, **kwargs):
self.path = pathname
self.query = query
if state and 'nextPathname' in state:
self.nextPath = state['nextPathname']
else:
self.nextPath = None
if self.path is None:
raise ReactRenderingError("No path returned for redirection.")
super(RouteRedirect, self).__init__(*args, **kwargs)
@property
def url(self):
if self.query:
return "%s?next=%s&%s" % (self.path, self.nextPath, self.query)
else:
return "%s?next=%s" % (self.path, self.nextPath)
class RouteNotFound(object):
def __init__(self, *args, **kwargs):
super(RouteNotFound, self).__init__(*args, **kwargs)
js_host_function = Function(settings.JS_HOST_FUNCTION)
def render_route(
# Rendering options
path, # path to routes file
client_path, # path to client routes file
request, # pass in request object
props=None,
to_static_markup=None,
# Bundling options
bundle=None,
translate=None,
# Prop handling
json_encoder=None
):
if not os.path.isabs(path):
abs_path = staticfiles.find(path)
if not abs_path:
raise ComponentSourceFileNotFound(path)
path = abs_path
if not os.path.exists(path):
raise ComponentSourceFileNotFound(path)
if not os.path.isabs(client_path):
abs_client_path = staticfiles.find(client_path)
if not abs_client_path:
raise ComponentSourceFileNotFound(client_path)
client_path = abs_client_path
if not os.path.exists(client_path):
raise ComponentSourceFileNotFound(client_path)
bundled_component = None
import re
client_re = re.compile(r"client-(?:\w*\d*).js",re.IGNORECASE)
server_re = re.compile(r"server-(?:\w*\d*).js",re.IGNORECASE)
if bundle or translate:
bundled_component = bundle_component(path, client_path, translate=translate)
assets = bundled_component.get_assets()
for asset in assets:
m = client_re.search(asset['name'])
if m:
client_path = asset['path']
m = server_re.search(asset['name'])
if m:
path = asset['path']
if json_encoder is None:
json_encoder = JSONEncoder
if props is not None:
serialized_props = json.dumps(props, cls=json_encoder)
else:
serialized_props = None
try:
location = {
'pathname': request.path,
'query': request.GET.dict()
}
cbData = json.loads(js_host_function.call(
path=path,
location=location,
serializedProps=serialized_props,
toStaticMarkup=to_static_markup
))
except FunctionError as e:
raise six.reraise(ReactRenderingError, ReactRenderingError(*e.args), sys.exc_info()[2])
if cbData['match']:
return RouteRenderedComponent(cbData['markup'], client_path, props, serialized_props, bundled_component, to_static_markup)
else:
if cbData['redirectInfo']:
return RouteRedirect(**cbData['redirectInfo'])
else:
return RouteNotFound()
|
import pytest
import math
import pickle
from ezdxf.math._vector import Vec2, Vec3
from ezdxf.acc import USE_C_EXT
all_vec_classes = [Vec2, Vec3]
vec2_only = [Vec2]
if USE_C_EXT:
from ezdxf.acc.vector import Vec2 as CVec2
all_vec_classes.append(CVec2)
vec2_only.append(CVec2)
@pytest.fixture(params=all_vec_classes)
def vcls(request):
return request.param
@pytest.fixture(params=vec2_only)
def vec2(request):
return request.param
def test_init_tuple(vcls):
v = vcls((2, 3))
assert v.x == 2
assert v.y == 3
def test_empty_init(vcls):
v = vcls()
assert v.x == 0.
assert v.y == 0.
def test_init_vec2(vcls):
v = Vec2(vcls(2, 3))
assert v.x == 2
assert v.y == 3
def test_compatible_to_vector():
v = Vec3(Vec2(1, 2))
assert v == (1, 2, 0)
v = Vec2(Vec3(1, 2, 3))
assert v.x == 1
assert v.y == 2
def test_vec3(vec2):
v = vec2(1, 2)
assert len(v) == 2
v3 = v.vec3
assert len(v3) == 3
assert v3 == (1, 2, 0)
def test_round(vec2):
v = vec2(1.123, 2.123)
v2 = v.round(1)
assert v2 == (1.1, 2.1)
def test_from_angle(vcls):
angle = math.radians(50)
length = 3.0
assert vcls.from_angle(angle, length) == vcls(
(math.cos(angle) * length, math.sin(angle) * length)
)
def test_vec2_as_tuple(vec2):
v = vec2(1, 2)
assert v[0] == 1
assert v[1] == 2
with pytest.raises(IndexError):
_ = v[2]
# negative indices not supported
with pytest.raises(IndexError):
_ = v[-1]
def test_iter(vcls):
assert sum(vcls(1, 2)) == 3
def test_deep_copy():
import copy
v = Vec2(1, 2)
l1 = [v, v, v]
l2 = copy.copy(l1)
assert l2[0] is l2[1]
assert l2[1] is l2[2]
assert l2[0] is v
# Vec3, CVec2 and CVec3 are immutable and do not create copies of itself!
l3 = copy.deepcopy(l1)
assert l3[0] is l3[1]
assert l3[1] is l3[2]
assert l3[0] is not v
def test_get_angle(vcls):
v = vcls(3, 3)
assert math.isclose(v.angle_deg, 45)
assert math.isclose(v.angle, math.radians(45))
def test_compare_vectors(vcls):
v1 = vcls(1, 2)
assert v1 == v1
v2 = vcls(2, 3)
assert v2 > v1
assert v1 < v2
def test_is_close(vcls):
v1 = vcls(421846.9857097387, -36908.41493252139)
v2 = vcls(421846.9857097387, -36908.41493252141)
assert v1.isclose(v2) is True
def test_is_null(vcls):
v = vcls(0, 0)
assert v.is_null is True
v1 = vcls(23.56678, 56678.56778) * (1.0 / 14.5667)
v2 = vcls(23.56678, 56678.56778) / 14.5667
assert (v2 - v1).is_null
def test_is_not_null_default_abs_tol(vcls):
assert vcls(1e-11, 0).is_null is False
def test_is_null_default_abs_tol(vcls):
assert vcls(1e-12, 0).is_null is True
def test_bool(vcls):
v = vcls((0, 0))
assert bool(v) is False
v1 = vcls(23.56678, 56678.56778) * (1.0 / 14.5667)
v2 = vcls(23.56678, 56678.56778) / 14.5667
result = v2 - v1
assert bool(result) is False
# current rel_tol=1e-9
assert not vcls(1e-8, 0).is_null
def test_magnitude(vcls):
v = vcls(3, 4)
assert math.isclose(abs(v), 5)
assert math.isclose(v.magnitude, 5)
def test_normalize(vcls):
v = vcls(2, 0)
assert v.normalize() == (1, 0)
def test_normalize_to_length(vcls):
v = vcls(2, 0)
assert v.normalize(4) == (4, 0)
def test_orthogonal_ccw(vcls):
v = vcls(3, 4)
assert v.orthogonal() == (-4, 3)
def test_orthogonal_cw(vcls):
v = vcls(3, 4)
assert v.orthogonal(False) == (4, -3)
def test_negative(vcls):
v = vcls(2, 3)
assert -v == (-2, -3)
def test_add_vector(vcls):
assert vcls(2, 3) + vcls(7, 7) == (9, 10)
def test_add_vec3(vec2):
assert vec2(2, 3) + Vec3(7, 7) == (9, 10)
def test_iadd_vector(vec2):
v = Vec2(2, 3)
v += Vec2(7, 7)
assert v == (9, 10)
def test_add_scalar_type_erorr(vcls):
with pytest.raises(TypeError):
vcls(1, 1) + 1
def test_iadd_scalar_type_error(vcls):
v = vcls(2, 3)
with pytest.raises(TypeError):
v += 1
def test_radd_scalar_type_error(vcls):
with pytest.raises(TypeError):
1 + vcls(1, 1)
def test_radd_tuple_type_error(vec2):
with pytest.raises(TypeError):
(1, 1) + vec2(1, 1)
def test_sub_vector(vcls):
assert vcls(2, 3) - vcls(7, 7) == (-5, -4)
def test_isub_vector(vec2):
v = Vec2(2, 3)
v -= Vec2(7, 7)
assert v == (-5, -4)
def test_sub_vec3(vec2):
assert vec2(2, 3) - Vec3(7, 7) == (-5, -4)
def test_sub_scalar_type_error(vcls):
with pytest.raises(TypeError):
vcls(1, 1) - 1
def test_isub_scalar_type_erorr(vcls):
v = vcls(2, 3)
with pytest.raises(TypeError):
v -= 1
def test_rsub_tuple(vec2):
with pytest.raises(TypeError):
(2, 3) - vec2(7, 7)
def test_rsub_scalar_type_error(vcls):
with pytest.raises(TypeError):
1 - vcls(1, 1)
def test_mul_scalar(vcls):
v = vcls(2, 3)
assert v * 2 == (4, 6)
def test_imul_scalar(vcls):
v = vcls(2, 3)
v *= 2
assert v == (4, 6)
def test_rmul_scalar(vcls):
assert 2 * vcls(2, 3) == (4, 6)
def test_mul_tuple_type_error(vcls):
with pytest.raises(TypeError):
vcls(2, 3) * (2, 2)
def test_rmul_tuple_type_error(vcls):
with pytest.raises(TypeError):
(2, 2) * vcls(2, 3)
def test_imul_tuple_type_error(vcls):
v = vcls(2, 3)
with pytest.raises(TypeError):
v *= (2, 2)
def test_div_scalar(vcls):
v = vcls(2, 3)
assert v / 2 == (1, 1.5)
def test_idiv_scalar(vcls):
v = vcls(2, 3)
v /= 2
assert v == (1, 1.5)
def test_dot_product(vcls):
v1 = vcls(2, 7)
v2 = vcls(3, 9)
assert math.isclose(v1.dot(v2), 69)
def test_angle_deg(vcls):
assert math.isclose(vcls((0, 1)).angle_deg, 90)
assert math.isclose(vcls((0, -1)).angle_deg, -90)
assert math.isclose(vcls((1, 1)).angle_deg, 45)
assert math.isclose(vcls((-1, 1)).angle_deg, 135)
def test_angle_between(vcls):
v1 = vcls(0, 1)
v2 = vcls(1, 1)
angle = v1.angle_between(v2)
assert math.isclose(angle, math.pi / 4)
# reverse order, same result
angle = v2.angle_between(v1)
assert math.isclose(angle, math.pi / 4)
@pytest.mark.parametrize(
"v1, v2",
[
[(1, 0), (0, 0)],
[(0, 0), (1, 0)],
[(0, 0), (0, 0)],
],
)
def test_angle_between_null_vector(vcls, v1, v2):
with pytest.raises(ZeroDivisionError):
vcls(v1).angle_between(vcls(v2))
def test_angle_between_outside_domain():
v1 = Vec3(721.046967113573, 721.0469671135688, 0.0)
v2 = Vec3(-721.0469671135725, -721.0469671135688, 0.0)
angle = v1.angle_between(v2)
assert math.isclose(angle, math.pi)
# reverse order, same result
angle = v2.angle_between(v1)
assert math.isclose(angle, math.pi)
def test_rotate(vcls):
assert vcls(2, 2).rotate_deg(90).isclose(vcls(-2, 2))
def test_lerp(vcls):
v1 = vcls(1, 1)
v2 = vcls(4, 4)
assert v1.lerp(v2, 0.5) == (2.5, 2.5)
assert v1.lerp(v2, 0) == (1, 1)
assert v1.lerp(v2, 1) == (4, 4)
def test_project(vcls):
v = vcls(10, 0)
assert v.project(vcls(5, 0)) == (5, 0)
assert v.project(vcls(5, 5)) == (5, 0)
assert v.project(vcls(5, 5)) == (5, 0)
v = vcls(10, 10)
assert v.project(vcls(10, 0)).isclose(vcls(5, 5))
def test_det(vec2):
assert vec2(1, 0).det(vec2(0, 1)) == 1
assert vec2(0, 1).det(vec2(1, 0)) == -1
def test_sum(vcls):
assert vcls.sum([]).is_null is True
assert vcls.sum([vcls(1, 1)]) == (1, 1)
assert vcls.sum([vcls(1, 1), vcls(2, 2)]) == (3, 3)
def test_picklable(vec2):
for v in [vec2((1, 2.5)), vec2(1, 2.5)]:
pickled_v = pickle.loads(pickle.dumps(v))
assert v == pickled_v
assert type(v) is type(pickled_v)
|
from django.shortcuts import render
from .models import Course, Student, StudentCourse
from .serializers import CourseSerializer, StudentSerialiser
from rest_framework import viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
class StudentViewSet(viewsets.ModelViewSet):
queryset = Student.objects.all()
serializer_class = StudentSerialiser
@list_route(methods=['GET'])
def make(self, request):
username = request.GET.get('username', None)
if username:
Student.objects.get_or_create(nickname=username)
return Response({'success': True})
class CourseViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
def get_queryset(self):
result = super(CourseViewSet, self).get_queryset()
username = self.request.GET.get('username', None)
active = self.request.GET.get('active', None)
if not username or active != '1':
return result
user = Student.objects.get(nickname=username)
courses_ids = StudentCourse.objects.filter(student=user, active=True).values_list('course_id', flat=True)
return result.filter(id__in=courses_ids)
@detail_route(methods=['GET'])
def start(self, request, pk=None):
username = request.GET.get('username', None)
user = Student.objects.get(nickname=username)
course = Course.objects.get(id=pk)
student_course, created = StudentCourse.objects.get_or_create(student=user, course=course)
StudentCourse.objects.filter(student=user).update(active=False)
student_course.active = True
student_course.save()
return Response({'success': True})
|
import time
from test_framework.test_framework import DeuscoinTestFramework
from test_framework.util import *
class TimestampIndexTest(DeuscoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-timestampindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-timestampindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining 25 blocks..."
blockhashes = self.nodes[0].generate(25)
time.sleep(3)
print "Mining 25 blocks..."
blockhashes.extend(self.nodes[0].generate(25))
time.sleep(3)
print "Mining 25 blocks..."
blockhashes.extend(self.nodes[0].generate(25))
self.sync_all()
low = self.nodes[1].getblock(blockhashes[0])["time"]
high = low + 76
print "Checking timestamp index..."
hashes = self.nodes[1].getblockhashes(high, low)
assert_equal(len(hashes), len(blockhashes))
assert_equal(hashes, blockhashes)
print "Passed\n"
if __name__ == '__main__':
TimestampIndexTest().main()
|
"""
A simple example demonstrating the various ways to call cmd2.Cmd.read_input() for input history and tab completion
"""
from typing import (
List,
)
import cmd2
EXAMPLE_COMMANDS = "Example Commands"
class ReadInputApp(cmd2.Cmd):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.prompt = "\n" + self.prompt
self.custom_history = ['history 1', 'history 2']
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_basic(self, _) -> None:
"""Call read_input with no history or tab completion"""
self.poutput("Tab completion and up-arrow history is off")
try:
self.read_input("> ")
except EOFError:
pass
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_basic_with_history(self, _) -> None:
"""Call read_input with custom history and no tab completion"""
self.poutput("Tab completion is off but using custom history")
try:
input_str = self.read_input("> ", history=self.custom_history)
except EOFError:
pass
else:
self.custom_history.append(input_str)
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_commands(self, _) -> None:
"""Call read_input the same way cmd2 prompt does to read commands"""
self.poutput("Tab completing and up-arrow history configured for commands")
try:
self.read_input("> ", completion_mode=cmd2.CompletionMode.COMMANDS)
except EOFError:
pass
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_custom_choices(self, _) -> None:
"""Call read_input to use custom history and choices"""
self.poutput("Tab completing with static choices list and using custom history")
try:
input_str = self.read_input(
"> ",
history=self.custom_history,
completion_mode=cmd2.CompletionMode.CUSTOM,
choices=['choice_1', 'choice_2', 'choice_3'],
)
except EOFError:
pass
else:
self.custom_history.append(input_str)
# noinspection PyMethodMayBeStatic
def choices_provider(self) -> List[str]:
"""Example choices provider function"""
return ["from_provider_1", "from_provider_2", "from_provider_3"]
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_custom_choices_provider(self, _) -> None:
"""Call read_input to use custom history and choices provider function"""
self.poutput("Tab completing with choices from provider function and using custom history")
try:
input_str = self.read_input(
"> ",
history=self.custom_history,
completion_mode=cmd2.CompletionMode.CUSTOM,
choices_provider=ReadInputApp.choices_provider,
)
except EOFError:
pass
else:
self.custom_history.append(input_str)
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_custom_completer(self, _) -> None:
"""Call read_input to use custom history and completer function"""
self.poutput("Tab completing paths and using custom history")
try:
input_str = self.read_input(
"> ", history=self.custom_history, completion_mode=cmd2.CompletionMode.CUSTOM, completer=cmd2.Cmd.path_complete
)
self.custom_history.append(input_str)
except EOFError:
pass
@cmd2.with_category(EXAMPLE_COMMANDS)
def do_custom_parser(self, _) -> None:
"""Call read_input to use a custom history and an argument parser"""
parser = cmd2.Cmd2ArgumentParser(prog='', description="An example parser")
parser.add_argument('-o', '--option', help="an optional arg")
parser.add_argument('arg_1', help="a choice for this arg", metavar='arg_1', choices=['my_choice', 'your_choice'])
parser.add_argument('arg_2', help="path of something", completer=cmd2.Cmd.path_complete)
self.poutput("Tab completing with argument parser and using custom history")
self.poutput(parser.format_usage())
try:
input_str = self.read_input(
"> ", history=self.custom_history, completion_mode=cmd2.CompletionMode.CUSTOM, parser=parser
)
except EOFError:
pass
else:
self.custom_history.append(input_str)
if __name__ == '__main__':
import sys
app = ReadInputApp()
sys.exit(app.cmdloop())
|
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
**kwargs: Any
) -> "_models.OperationListResult":
"""Lists all of the available REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationListResult, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.OperationListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OperationListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/providers/Microsoft.DBForMariaDB/operations'} # type: ignore
|
from __future__ import absolute_import
from .make_haploblocks import get_haploblocks
from .genetic_models import check_genetic_models
from .model_score import get_model_score
from .fix_variant import make_print_version
from .variant_annotator import VariantAnnotator
|
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Vimeo(Service, OpenGraphThumbMixin):
supported_domains = ["vimeo.com", "player.vimeo.com"]
def get(self):
data = self.get_urldata()
match_cfg_url = re.search('data-config-url="([^"]+)" data-fallback-url', data)
match_clip_page_cfg = re.search(r"vimeo\.clip_page_config\s*=\s*({.+?});", data)
if match_cfg_url:
player_url = match_cfg_url.group(1).replace("&", "&")
elif match_clip_page_cfg:
page_config = json.loads(match_clip_page_cfg.group(1))
player_url = page_config["player"]["config_url"]
else:
yield ServiceError(f"Can't find video file for: {self.url}")
return
player_data = self.http.request("get", player_url).text
if player_data:
jsondata = json.loads(player_data)
if ("hls" in jsondata["request"]["files"]) and ("fastly_skyfire" in jsondata["request"]["files"]["hls"]["cdns"]):
hls_elem = jsondata["request"]["files"]["hls"]["cdns"]["fastly_skyfire"]
yield from hlsparse(self.config, self.http.request("get", hls_elem["url"]), hls_elem["url"], output=self.output)
avail_quality = jsondata["request"]["files"]["progressive"]
for i in avail_quality:
yield HTTP(copy.copy(self.config), i["url"], i["height"], output=self.output)
else:
yield ServiceError("Can't find any streams.")
return
|
from django.apps import apps
from django.contrib import admin
AccessToken = apps.get_model('oauth2', 'AccessToken')
Client = apps.get_model('oauth2', 'Client')
Grant = apps.get_model('oauth2', 'Grant')
RefreshToken = apps.get_model('oauth2', 'RefreshToken')
class AccessTokenAdmin(admin.ModelAdmin):
list_display = ('user', 'client', 'token', 'expires', 'scope')
raw_id_fields = ('user',)
class GrantAdmin(admin.ModelAdmin):
list_display = ('user', 'client', 'code', 'expires')
raw_id_fields = ('user',)
class ClientAdmin(admin.ModelAdmin):
list_display = ('url', 'user', 'redirect_uri', 'client_id', 'client_type')
raw_id_fields = ('user',)
admin.site.register(AccessToken, AccessTokenAdmin)
admin.site.register(Grant, GrantAdmin)
admin.site.register(Client, ClientAdmin)
admin.site.register(RefreshToken)
|
from rest_framework.permissions import BasePermission
class IsBuilding(BasePermission):
"""Checks if a current building (preselected by middleware)
has been assigned for this user"""
def has_permission(self, request, view):
return request.building is not None
|
from importlib import import_module
def import_object(object_path):
"""
Import class or function by path
:param object_path: path to the object for import
:return: imported object
"""
module_path, class_name = object_path.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
|
import cgi
import cgitb
cgitb.enable()
import mjl, mhl, flt
import redis,os # Questa volta servira` anche os ?
TestoPagina="Configurazione sensori di temperatura"
ConfigFile="../conf/thermo.json"
WriteFile="/cgi-bin/writesensors.py"
RedisKey = "sensore:temperatura"
Dir1w = "/sys/bus/w1/devices/"
MyDB = flt.OpenDBFile(ConfigFile)
List1wire = os.listdir(Dir1w)
List1wire.remove("w1_bus_master1")
for i in List1wire:
if not MyDB.exists(RedisKey+":"+i):
MyDB.set(RedisKey+":"+i,"Sensore"+i)
for i in MyDB.keys(RedisKey+":*"):
Esiste=""
for j in List1wire:
if flt.Decode(i) == RedisKey+":"+j:
Esiste="True"
if not Esiste:
MyDB.delete(i)
print (mhl.MyHtml())
print (mhl.MyHtmlHead())
print ("<h1>","<center>",TestoPagina,"</center>","</h1>")
print ("""
Questo cerca le sonde di temperatura, genera automaticamente le chiavi redis, eliminando eventuali sonde che non sono piu` collegate.
<br/>
L'inserimento e` possibile per la sola descrizione, che servira` al riconoscimento del sensore, nel caso ve ne fosse piu` di uno collegato.
<br/>
<br/>
<i>Inserire una descrizione di riconoscimento, la piu` breve possibile.</i>
<br/>
<br/>
<b>Ricorda di riconfigurare il PID ed eliminare/archiviare "temperature.csv" fer forzare la riscrittura dell'intestazione.</b>
<hr/>
<br/>
""")
print (mhl.MyActionForm(WriteFile,"POST"))
print ("<table>")
for i in List1wire:
# La prima voce non e` modificabile ed e` la chiave Redis (solo visualizzazione)
print ("<tr>")
print ("<td>")
print ("Key: ")
print ("</td>")
print ("<td>")
print (mhl.MyTextForm("key",i,"40","required","readonly"))
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("Descrizione sensore: ")
print ("</td>")
print ("<td>")
print (mhl.MyTextForm(RedisKey+":"+i,flt.Decode(MyDB.get(RedisKey+":"+i)),"40","required",""))
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("")
print ("</td>")
print ("<td>")
print ("<hr/>")
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td colspan=\"2\">")
print ("<hr/>")
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("</td>")
print ("<td colspan=\"2\">")
print (mhl.MyButtonForm("submit","Submit"))
print ("</td>")
print ("</tr>")
print ("</table>")
print (mhl.MyEndForm())
print (mhl.MyHtmlBottom())
|
import json
from pprint import pprint
import time
import io
def ampmformat (hhmmss):
"""
This method converts time in 24h format to 12h format
Example: "00:32" is "12:32 AM"
"13:33" is "01:33 PM"
"""
ampm = hhmmss.split (":")
if (len(ampm) == 0) or (len(ampm) > 3):
return hhmmss
# is AM? from [00:00, 12:00[
hour = int(ampm[0]) % 24
isam = (hour >= 0) and (hour < 12)
# 00:32 should be 12:32 AM not 00:32
if isam:
ampm[0] = ('12' if (hour == 0) else "%02d" % (hour))
else:
ampm[0] = ('12' if (hour == 12) else "%02d" % (hour-12))
return ':'.join (ampm) + (' AM' if isam else ' PM')
json_data=open('allData2003_2004.json')
data = json.load(json_data)
json_data.close()
output = {}
for k in data.keys():
for d in data[k]:
date = time.strptime(d['date'], "%b %d, %Y %I:%M:%S %p")
if k in output:
t = ampmformat('%02d:%02d:%02d' % (date.tm_hour, date.tm_min, date.tm_sec))
h = date.tm_hour
output[k]['sum'] += d['value']
output[k]['hourly'][h] += d['value']
else:
output[k] = { "sum": 0,
"hourly": [0]*24
}
t = ampmformat('%02d:%02d:%02d' % (date.tm_hour, date.tm_min, date.tm_sec))
h = date.tm_hour
output[k]['sum'] += d['value']
output[k]['hourly'][h] += d['value']
f = io.open('data.json', 'w', encoding='utf-8')
f.write(unicode(json.dumps(output, ensure_ascii=False)))
f.close()
json_output=open('data.json')
output_data = json.load(json_output)
pprint(output_data)
json_output.close()
|
class bitarray():
def __init__(self,length,defaultValue=False):
if (length < 0):
raise Exception("Length param error")
self.array=[]
self.length=length
fillValue=defaultValue
for i in range(self.length):
self.array.append(defaultValue)
self.version=0
def input_from_array(self,value):
if(isinstance(value,list)==False):
raise Exception("value is not a Array")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.Set(i,value[i])
self.version+=1
return self
def __len__(self):
return self.length
def __str__(self):
str="["
for i in range(self.length):
str+="1" if self.array[i]==True else "0"
str+=" "
str+="]"
return str
def Get (self,index):
if (index < 0 or index >=self.length):
raise Exception("ArgumentOutOfRangeException if index < 0 or index >= GetLength()")
return self.array[index]
def Set (self,index,value):
if (index < 0 or index >=self.length):
raise Exception("ArgumentOutOfRangeException if index < 0 or index >= GetLength()")
if (value):
self.array[index]=True
else:
self.array[index]=False
self.version+=1
def SetAll(self,value):
for i in range(self.length):
self.Set(i,value)
self.version+=1
def And (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]&=value.Get(i)
self.version+=1
return self
def Or (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]|=value.Get(i)
self.version+=1
return self
def Xor (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]^=value.Get(i)
self.version+=1
return self
def Not (self):
for i in range(self.length):
self.array[i] =not self.array[i]
self.version+=1
return self
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
from configparser import SafeConfigParser
class Configurable(object):
"""
Configuration processing for the network
"""
def __init__(self, *args, **kwargs):
self._name = kwargs.pop("name", "Unknown")
if args and kwargs:
raise TypeError('Configurable must take either a config parser or keyword args')
if len(args) > 1:
raise TypeError('Configurable takes at most one argument')
if args:
self._config = args[0]
else:
self._config = self._configure(**kwargs)
return
@property
def name(self):
return self._name
def _configure(self, **kwargs):
config = SafeConfigParser()
config_file = kwargs.pop("config_file", "")
config.read(config_file)
# Override the config setting if the (k,v) specified in command line
for option, value in kwargs.items():
assigned = False
for section in config.sections():
if option in config.options(section):
config.set(section, option, str(value))
assigned = True
break
if not assigned:
raise ValueError("%s is not a valid option" % option)
return config
argparser = argparse.ArgumentParser()
argparser.add_argument('--config_file')
# ======
# [OS]
@property
def model_type(self):
return self._config.get('OS', 'model_type')
argparser.add_argument('--model_type')
@property
def mode(self):
return self._config.get('OS', 'mode')
argparser.add_argument('--mode')
@property
def save_dir(self):
return self._config.get('OS', 'save_dir')
argparser.add_argument('--save_dir')
@property
def word_file(self):
return self._config.get('OS', 'word_file')
argparser.add_argument('--word_file')
@property
def target_file(self):
return self._config.get('OS', 'target_file')
argparser.add_argument('--target_file')
@property
def train_file(self):
return self._config.get('OS', 'train_file')
argparser.add_argument('--train_file')
@property
def valid_file(self):
return self._config.get('OS', 'valid_file')
argparser.add_argument('--valid_file')
@property
def test_file(self):
return self._config.get('OS', 'test_file')
argparser.add_argument('--test_file')
@property
def save_model_file(self):
return self._config.get('OS', 'save_model_file')
argparser.add_argument('--save_model_file')
@property
def restore_from(self):
return self._config.get('OS', 'restore_from')
argparser.add_argument('--restore_from')
@property
def embed_file(self):
return self._config.get('OS', 'embed_file')
argparser.add_argument('--embed_file')
@property
def use_gpu(self):
return self._config.getboolean('OS', 'use_gpu')
argparser.add_argument('--use_gpu')
# [Dataset]
@property
def n_bkts(self):
return self._config.getint('Dataset', 'n_bkts')
argparser.add_argument('--n_bkts')
@property
def n_valid_bkts(self):
return self._config.getint('Dataset', 'n_valid_bkts')
argparser.add_argument('--n_valid_bkts')
@property
def dataset_type(self):
return self._config.get('Dataset', 'dataset_type')
argparser.add_argument('--dataset_type')
@property
def min_occur_count(self):
return self._config.getint('Dataset', 'min_occur_count')
argparser.add_argument('--min_occur_count')
# [Learning rate]
@property
def learning_rate(self):
return self._config.getfloat('Learning rate', 'learning_rate')
argparser.add_argument('--learning_rate')
@property
def epoch_decay(self):
return self._config.getint('Learning rate', 'epoch_decay')
argparser.add_argument('--epoch_decay')
@property
def dropout(self):
return self._config.getfloat('Learning rate', 'dropout')
argparser.add_argument('--dropout')
# [Sizes]
@property
def words_dim(self):
return self._config.getint('Sizes', 'words_dim')
argparser.add_argument('--words_dim')
# [Training]
@property
def log_interval(self):
return self._config.getint('Training', 'log_interval')
argparser.add_argument('--log_interval')
@property
def valid_interval(self):
return self._config.getint('Training', 'valid_interval')
argparser.add_argument('--valid_interval')
@property
def train_batch_size(self):
return self._config.getint('Training', 'train_batch_size')
argparser.add_argument('--train_batch_size')
@property
def test_batch_size(self):
return self._config.getint('Training', 'test_batch_size')
argparser.add_argument('--test_batch_size')
|
from pyquery import PyQuery as pq
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
from loguru import logger
BASE_URL = 'https://www.xicidaili.com/'
class XicidailiCrawler(BaseCrawler):
"""
xididaili crawler, https://www.xicidaili.com/
"""
urls = [BASE_URL]
ignore = True
def parse(self, html):
"""
parse html file to get proxies
:return:
"""
doc = pq(html)
items = doc('#ip_list tr:contains(高匿)').items()
for item in items:
country = item.find('td.country').text()
if not country or country.strip() != '高匿':
continue
host = item.find('td:nth-child(2)').text()
port = int(item.find('td:nth-child(3)').text())
yield Proxy(host=host, port=port)
if __name__ == '__main__':
crawler = XicidailiCrawler()
for proxy in crawler.crawl():
print(proxy)
|
from django.contrib import admin
from .models import Contact
admin.site.register(Contact)
|
from interface.design.ui_screen import Ui_wnd_gifextract
from PyQt5 import QtWidgets
import sys
import listener
import config
import ffmpeg
import queue
import interface.menus.Frame_CreateGif
import interface.menus.Frame_ExtractFrames
import interface.menus.Frame_Queue
class Screen(QtWidgets.QMainWindow):
def __init__(self, parent=None):
def setupFFMpeg():
self.ffmpeg = ffmpeg.FFmpeg(self.config)
def setupConfig():
self.config = config.Config(self)
def setupQueue():
self.queue = queue.JobQueue(self)
def setupTabs():
self.tab_video = interface.menus.Frame_ExtractFrames.Frame(self)
self.ui.tabWidget.addTab(self.tab_video, "Frame Extraction")
self.tab_gif = interface.menus.Frame_CreateGif.Frame(self)
self.ui.tabWidget.addTab(self.tab_gif, "Gif Creation")
self.tab_queue = interface.menus.Frame_Queue.Frame(self)
self.ui.tabWidget.addTab(self.tab_queue, "Queue")
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_wnd_gifextract()
self.ui.setupUi(self)
self.slots = listener.Slots(self)
self.createLinks()
setupConfig()
setupTabs()
setupFFMpeg()
setupQueue()
def createLinks(self):
self.ui.actionPreferences.triggered.connect(self.openOptions)
def openOptions(self):
import interface.menus.ConfigMenu
options = interface.menus.ConfigMenu.ConfigMenu(self, self.config)
options.show()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
program = Screen()
program.show()
sys.exit(app.exec_())
|
from django.core.management import call_command
from django.test import TestCase
from mock import call
from mock import patch
from kolibri.core.content import models as content
class DeleteChannelTestCase(TestCase):
"""
Testcase for delete channel management command
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def delete_channel(self):
call_command("deletechannel", self.the_channel_id)
def test_channelmetadata_delete_remove_metadata_object(self):
self.delete_channel()
self.assertEquals(0, content.ChannelMetadata.objects.count())
def test_channelmetadata_delete_remove_contentnodes(self):
self.delete_channel()
self.assertEquals(0, content.ContentNode.objects.count())
def test_channelmetadata_delete_leave_unrelated_contentnodes(self):
c2c1 = content.ContentNode.objects.get(title="c2c1")
new_id = c2c1.id[:-1] + "1"
content.ContentNode.objects.create(
id=new_id,
content_id=c2c1.content_id,
kind=c2c1.kind,
channel_id=c2c1.channel_id,
available=True,
title=c2c1.title,
)
self.delete_channel()
self.assertEquals(1, content.ContentNode.objects.count())
def test_channelmetadata_delete_remove_file_objects(self):
self.delete_channel()
self.assertEquals(0, content.File.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_files(self, os_remove_mock, content_file_path):
path = "testing"
content_file_path.return_value = path
num_files = content.LocalFile.objects.filter(available=True).count()
self.delete_channel()
os_remove_mock.assert_has_calls([call(path)] * num_files)
|
from wtforms import IntegerField, SelectMultipleField
from wtforms.validators import NumberRange
from dmutils.forms import DmForm
import flask_featureflags
class BriefSearchForm(DmForm):
page = IntegerField(default=1, validators=(NumberRange(min=1),))
status = SelectMultipleField("Status", choices=(
("live", "Open",),
("closed", "Closed",)
))
# lot choices expected to be set at runtime
lot = SelectMultipleField("Category")
def __init__(self, *args, **kwargs):
"""
Requires extra keyword arguments:
- `framework` - information on the target framework as returned by the api
- `data_api_client` - a data api client (should be able to remove the need for this arg at some point)
"""
super(BriefSearchForm, self).__init__(*args, **kwargs)
try:
# popping this kwarg so we don't risk it getting fed to wtforms default implementation which might use it
# as a data field if there were a name collision
framework = kwargs.pop("framework")
self._framework_slug = framework["slug"]
self.lot.choices = tuple((lot["slug"], lot["name"],) for lot in framework["lots"] if lot["allowsBrief"])
except KeyError:
raise TypeError("Expected keyword argument 'framework' with framework information")
try:
# data_api_client argument only needed so we can fit in with the current way the tests mock.patch the
# the data_api_client directly on the view. would be nice to able to use the global reference to this
self._data_api_client = kwargs.pop("data_api_client")
except KeyError:
raise TypeError("Expected keyword argument 'data_api_client'")
def get_briefs(self):
if not self.validate():
raise ValueError("Invalid form")
statuses = self.status.data or tuple(id for id, label in self.status.choices)
lots = self.lot.data or tuple(id for id, label in self.lot.choices)
# disable framework filtering when digital marketplace framework is live
kwargs = {} if flask_featureflags.is_active('DM_FRAMEWORK') else {"framework": self._framework_slug}
return self._data_api_client.find_briefs(
status=",".join(statuses),
lot=",".join(lots),
page=self.page.data,
per_page=75,
human=True,
**kwargs
)
def get_filters(self):
"""
generate the same "filters" structure as expected by search page templates
"""
if not self.validate():
raise ValueError("Invalid form")
return [
{
"label": field.label,
"filters": [
{
"label": choice_label,
"name": field.name,
"id": "{}-{}".format(field.id, choice_id),
"value": choice_id,
"checked": field.data and choice_id in field.data,
}
for choice_id, choice_label in field.choices
],
}
for field in (self.lot, self.status,)
]
def filters_applied(self):
"""
returns boolean indicating whether the results are actually filtered at all
"""
if not self.validate():
raise ValueError("Invalid form")
return bool(self.lot.data or self.status.data)
|
import os
import re
import string
def isMov(filename):
# ÅжÏÊÇ·ñΪµçÓ°Îļþ
suffix = filename.split('.')[-1].lower() # ÌáÈ¡ºó׺
pattern = re.compile(r'mpg|mpeg|m2v|mkv|dat|vob|avi|wmv|rm|ram|rmvb|mov|avi|mp4|qt|viv')
if pattern.search(suffix): # Æ¥ÅäÊÇ·ñΪµçÓ°¸ñʽ
return True
else:
return False
if __name__=='__main__':
# ±éÀúµ±Ç°Ä¿Â¼
print '´¦ÀíÖС¡'
cnt = 1
for fp in os.listdir(os.getcwd()):
if os.path.isfile(fp) and isMov(fp): # ÊǵçÓ°Îļþ
if fp[0]=='[': # È¥µô¿ªÍ·µÄ[]
index = fp.find(']')
if index!=-1:
print '[%d] %s ==> %s'%(cnt,fp,fp[index+1:])
os.rename(fp,fp[index+1:])
fp = fp[index+1:]
cnt+=1
elif fp[:2]=='¡¾': # È¥µô¿ªÍ·µÄ¡¾¡¿
index = fp.find('¡¿')
if index!=-1:
print '[%d] %s ==> %s'%(cnt,fp,fp[index+2:])
os.rename(fp,fp[index+2:])
fp = fp[index+2:]
cnt+=1
if fp[0] =='.' or fp[0]=='-': # È¥µô¿ªÍ·µÄ'.' »ò '-'
print '[%d] %s ==> %s'%(cnt,fp,fp[1:])
os.rename(fp,fp[1:])
if cnt==1:
print 'ûÓÐÐèÒª´¦ÀíµÄµçÓ°Îļþ'
else:
print '´¦ÀíÍê±Ï'
|
import db_info
import db_cancel
import db_news
import hashlib
from tweeter import format_info, format_cancel, format_news
import settings
log = settings.log
def add_info_to_queue(q, *args):
try:
# 更新した数をカウント
updated = 0
for lec_info in args:
id = db_info.add_info(*lec_info)
if id is not False:
lec_info.append(id)
# Tweetする用に文章をフォーマット
t = format_info(*lec_info)
# キューに投入
q.put(t)
updated += 1
else:
pass
else:
# 更新した数を返す
return updated
except Exception as e:
log.exception(e)
def add_cancel_to_queue(q, *args):
try:
# 更新した数をカウント
updated = 0
for lec_cancel in args:
cancel_id = db_cancel.add_cancel(*lec_cancel)
if cancel_id is not False:
lec_cancel.append(cancel_id)
# Tweetする用に文章をフォーマット
t = format_cancel(*lec_cancel)
# キューに投入
q.put(t)
updated += 1
else:
pass
else:
# 更新数を返す
return updated
except Exception as e:
log.exception(e)
def add_news_to_queue(q, *args):
try:
# 更新した数をカウント
updated = 0
for news in args:
news_id = db_news.add_news(*news)
if news_id is not False:
news.append(news_id)
# Tweetする用に文章をフォーマット
t = format_news(*news)
# キューに投入
q.put(t)
updated += 1
else:
pass
else:
# 更新数を返す
return updated
except Exception as e:
log.exception(e)
|
import argparse
from collections import defaultdict
def calculateJaccardIndex(x,z,neighbours):
shared = neighbours[x].intersection(neighbours[z])
combined = neighbours[x].union(neighbours[z])
return len(shared)/float(len(combined))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate scores for a set of scores')
parser.add_argument('--cooccurrenceFile',type=str,required=True,help='File containing cooccurrences')
parser.add_argument('--occurrenceFile',type=str,required=True,help='File containing occurrences')
parser.add_argument('--sentenceCount',type=str,required=True,help='File containing sentence count')
parser.add_argument('--relationsToScore',type=str,required=True,help='File containing relations to score')
parser.add_argument('--anniVectors',type=str,help='File containing the raw ANNI vector data')
parser.add_argument('--anniVectorsIndex',type=str,help='File containing the index for the ANNI vector data')
parser.add_argument('--outFile',type=str,required=True,help='File to output scores to')
args = parser.parse_args()
print "Loading relationsToScore"
relationsToScore = []
entitiesToScore = set()
with open(args.relationsToScore) as f:
for line in f:
split = map(int,line.strip().split())
x,y = split[:2]
relationsToScore.append((x,y))
entitiesToScore.add(x)
entitiesToScore.add(y)
entitiesToScore = sorted(list(entitiesToScore))
print "Loaded relationsToScore"
print "Loading cooccurrences..."
neighbours = defaultdict(set)
with open(args.cooccurrenceFile) as f:
for line in f:
x,y,count = map(int,line.strip().split())
neighbours[x].add(y)
neighbours[y].add(x)
print "Loaded cooccurrences"
print "Scoring..."
with open(args.outFile,'w') as outF:
for i,(x,z) in enumerate(relationsToScore):
if (i%10000) == 0:
print i
jaccardScore = calculateJaccardIndex(x,z,neighbours)
outData = [x,z,jaccardScore]
outLine = "\t".join(map(str,outData))
outF.write(outLine+"\n")
print "Completed scoring"
print "Output to %s" % args.outFile
|
import scrapy
import xml.etree.ElementTree as ET
from locations.items import GeojsonPointItem
URL = 'http://hosted.where2getit.com/auntieannes/2014/ajax?&xml_request=%3Crequest%3E%3Cappkey%3E6B95F8A2-0C8A-11DF-A056-A52C2C77206B%3C%2Fappkey%3E%3Cformdata+id%3D%22locatorsearch%22%3E%3Cdataview%3Estore_default%3C%2Fdataview%3E%3Climit%3E250%3C%2Flimit%3E%3Cgeolocs%3E%3Cgeoloc%3E%3Caddressline%3E{}%3C%2Faddressline%3E%3Clongitude%3E%3C%2Flongitude%3E%3Clatitude%3E%3C%2Flatitude%3E%3Ccountry%3E{}%3C%2Fcountry%3E%3C%2Fgeoloc%3E%3C%2Fgeolocs%3E%3Cwhere%3E%3Cor%3E%3Chascatering%3E%3Ceq%3E%3C%2Feq%3E%3C%2Fhascatering%3E%3Chaspretzelfieldtrip%3E%3Ceq%3E%3C%2Feq%3E%3C%2Fhaspretzelfieldtrip%3E%3Cnewstores%3E%3Ceq%3E%3C%2Feq%3E%3C%2Fnewstores%3E%3C%2For%3E%3C%2Fwhere%3E%3Csearchradius%3E10%7C25%7C50%7C100%7C250%7C500%7C750%7C1000%3C%2Fsearchradius%3E%3Cstateonly%3E1%3C%2Fstateonly%3E%3C%2Fformdata%3E%3C%2Frequest%3E'
US_STATES = (
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
UK_Cities = (
'London', 'Birmingham', 'Manchester', 'Glasgow', 'Leeds',
'Liverpool', 'Bristol', 'Newcastle', 'Sunderland', 'Wolverhampton',
'Nottingham', 'Sheffield', 'Belfast', 'Leicester', 'Bradford',
)
UAE_Cities = (
"Abu Dhabi", "Sharjah", "Dubai", "Dayrah","Al Ain",
"Fujairah", "Ras al-Khaimah", "Ar Ruways", "As Satwah",
"Al Khan",
)
TT_Cities = (
"Arima", "San Fernando", "Princes Town", "Piarco", "RioClaro", "Port of Spain",
"Victoria", "Maraval", "Fyzabad", "Debe", "Couva", "Diego Martin", "Chaguanas",
"Penal", "Cunupia", "Curepe", "Roxborough", "San Juan", "Arouca", "Saint Joseph",
"California", "Marabella", "Siparia", "Gasparillo", "Morvant", "Barataria", "Saint Clair",
"Laventille", "Carenage", "Ward of Tacarigua", "Caroni", "Lopinot", "Tunapuna", "Santa Cruz",
"Saint Augustine", "Golden Lane", "Scarborough", "Moriah", "Saint James", "Carapichaima",
"Valsayn", "Freeport", "Claxton Bay", "Sangre Grande", "Cumuto", "Woodbrook", "Petit Valley",
"El Dorado", "Phoenix Park",
)
Thailand_Cities = (
"Bangkok", "Chumphon", "Kathu", "Phang Khon", "Sakon Nakhon", "Mueang Nonthaburi",
"Kalasin", "Chon Buri", "Loei", "Khon Kaen", "Nong Bua Lamphu", "Roi Et", "Udon Thani",
"Kumphawapi", "Kanchanaburi", "Nong Khai", "Ayutthaya", "Chiang Mai", "Songkhla",
"Chiang Rai", "Surin", "Thanyaburi", "Wiphawadi", "Phuket", "Sing Buri", "Satun",
"Prachin Buri", "Ubon Ratchathani", "Pattaya", "Yala", "Bang Na", "Samut Songkhram", "Phetchabun"
"Ratchaburi", "Lampang", "Narathiwat", "New Sukhothai", "Lopburi", "Uttaradit", "Maha Sarakham",
"Mae Hong Son", "Suphan Buri", "Chachoengsao", "Samut Sakhon", "Phrae", "Din Daeng",
"Pathum Wan", "Phayao", "Trang", "Mukdahan", "Phetchaburi", "Uthai Thani", "Krabi", "Phichit",
"Phitsanulok", "Ban Pat Mon", "Prachuap Khiri Khan", "Ban Khlong Prasong", "Yasothon",
"Ranong", "Lamphun", "Nong Bua", "Amnat Charoen", "Ban Phichit", "Bang Khae", "Thon Buri",
"Min Buri", "Ban Tham", "Sam Sen", "Ang Thong", "Mueang Samut Prakan", "Sa Kaeo", "Pathum Thani",
"Chanthaburi", "Huai Khwang", "Rayong", "Sattahip", "Phan", "Si Racha", "Phatthalung",
"Rawai", "Buriram", "Dusit", "Khlong Luang", "Trat", "Ban Bueng", "Sung Noen", "Manorom",
"Ban Bang Plong", "Tak", "Ban Tha Duea", "Amphawa", "Ban Pong Lang", "Phaya Thai", "Si Sa Ket",
"Nakhon Ratchasima", "Bang Phlat", "Ban Bang Phli Nakhon", "Salaya", "Krathum Baen",
"Hua Hin", "Ban Talat Rangsit", "Ban Khlong Ngae", "Nong Prue", "Wang Thonglang",
"Samphanthawong", "Bang Khun Thian", "Chatuchak", "Chaiyaphum",
"Nakhon Pathom", "Nan", "Bang Kruai", "Sathon", "Suan Luang", "Ban Wang Yai"
"Khlong San", "Watthana", "Lat Krabang", "Muak Lek", "Kosum Phisai", "Ban Phlam", "Non Thai",
"Photharam", "Thalang", "Bang Kapi", "Long", "Ka Bang", "Pattani", "Nakhon Si Thammarat",
"Khlong Toei", "Cha-am", "Amphoe Aranyaprathet", "Phang Nga", "Ban Tha Ruea", "Chiang Muan",
"Ban Ang Thong", "Ban Khlong Takhian", "Khan Na Yao", "Bang Sue", "Sam Khok", "Don Mueang",
"Ban Pratunam Tha Khai","Sena", "Prakanong", "Ban Tha Pai", "Bang Lamung", "Nakhon Sawan",
"San Sai", "Kamphaeng Phet", "Pak Kret", "Hat Yai", "Ban Nam Hak", "Khlung", "Makkasan",
"Bang Sao Thong", "Ban Hua Thale", "Klaeng", "Chulabhorn", "Ban Don Sak", "Phanna Nikhom",
"Ban Na", "Ban Ko Pao","Mae Sot"
)
Korea_Cities = (
"Seoul", "Incheon", "Paju", "Cheonan", "Yongin", "Kwanghui-dong", "Pon-dong",
"Gwangju", "Gwangmyeong", "Tang-ni", "Busan", "Seongnam-si", "Suwon-si", "Namyang",
"Namyangju", "Jeju-si", "Ulsan", "Osan", "Hanam", "Pyong-gol", "Anyang-si",
"Yangsan", "Daejeon", "Nonsan", "Seocho", "Wonju", "Kisa", "Daegu", "Ansan-si", "Gongju",
"Haeundae", "Sasang", "Bucheon-si", "Chuncheon", "Ilsan-dong", "Naju", "Jinju", "Uiwang",
"Gangneung", "Yongsan-dong", "Pohang", "Changwon", "Jeonju", "Yeosu",
"Songnim", "Gimhae", "Songjeong", "Hyoja-dong", "Icheon-si", "Kimso", "Iksan", "Deokjin",
"Koyang-dong", "Samsung", "Anseong", "Samjung-ni", "Mapo-dong", "Gunnae", "Nae-ri",
"Suncheon", "Okpo-dong", "Moppo", "Sangdo-dong", "Cheongju-si", "Ch'aeun",
"Taebuk", "Yeoju", "Seong-dong", "Duchon", "Gyeongju", "Andong", "Seosan City", "Asan",
"Miryang", "Wonmi-gu", "Janghowon", "Chungnim", "Songam", "Tongan", "Ap'o", "Jecheon",
"Se-ri", "Ka-ri", "Hansol", "Songang", "Hyangyang", "Gyeongsan-si", "Gumi", "Unpo",
"Ulchin", "Namhyang-dong", "T'aebaek", "Hadong", "Haesan", "Chungju", "Chilgok",
)
Singapore_Cities = (
"Singapore", "Yishun New Town", "Bedok New Town", "Ayer Raja New Town",
"Kalang", "Tampines New Town", "Ang Mo Kio New Town", "Kampong Pasir Ris", "Hougang",
"Yew Tee", "Choa Chu Kang New Town", "Punggol", "Changi Village", "Bukit Timah Estate",
"Serangoon", "Jurong Town", "Tanglin Halt", "Woodlands New Town", "Jurong East New Town",
"Bukit Panjang New Town", "Bukit Batok New Town", "Pasir Panjang", "Holland Village",
"Tai Seng", "Toa Payoh New Town", "Bukit Timah", "Jurong West New Town", "Kembangan",
"Queenstown Estate", "Boon Lay", "Simei New Town", "Pandan Valley", "Clementi New Town",
"Tanjong Pagar"
)
Saudi_Arabia_Cities = (
"Riyadh", "Dammam", "Safwa", "Al Qatif", "Dhahran", "Al Faruq", "Khobar", "Jubail",
"Sayhat", "Jeddah", "Ta'if", "Mecca", "Al Hufuf", "Medina", "Rahimah", "Rabigh",
"Yanbu` al Bahr", "Abqaiq", "Mina", "Ramdah", "Linah", "Abha", "Jizan", "Al Yamamah",
"Tabuk", "Sambah", "Ras Tanura", "At Tuwal", "Sabya", "Buraidah", "Najran", "Sakaka",
"Madinat Yanbu` as Sina`iyah", "Hayil", "Khulays", "Khamis Mushait", "Ra's al Khafji",
"Al Bahah", "Rahman", "Jazirah", "Jazirah"
)
Indonesia_Cities = (
"Jakarta", "Surabaya", "Medan", "Bandung", "Bekasi", "Palembang", "Tangerang", "Makassar",
"Semarang", "South Tangerang",
)
Malaysia_Cities = (
"Kaula Lumpur", "Kota Bharu", "Klang", "Johor Bahru", "Subang Jaya", "Ipoh", "Kuching", "Seremban",
"Petaling Jaya", "Shah Alam", 'Penang', 'Kelantan', "Pantai", "Petaling Jaya", "Kajang",
"Setapak", "Bukit Kayu Hitam", "Bayan Lepas", "Taiping", "Kuala Terengganu", "Kuantan",
"Alor Gajah",
)
Japan_Cities = (
'Tokyo', "Hiroshima", "Saitama", "Nihon'odori", "Ibaraki", "Urayasu",
"Suita", "Funabashi", "Nagareyama", "Ichikawa", "Isesaki", "Koga", "Ichihara",
"Koshigaya", "Shibukawa", "Aoicho", "Yamakita", "Gotemba", "Nisshin", "Nishinomiya",
"Den'en-chofu", "Kawasaki", "Toyama-shi", "Moriguchi", "Chita", "Sano", "Nagoya-shi",
"Kyoto", "Hamamatsu", "Shimotoda", "Yachiyo", "Tsukuba", "Chiba", "Yokohama",
"Yamanashi", "Ashihara", "Kawaguchi", "Kasukabe", "Shizuoka", "Kawanishi", "Itami",
"Kobe", "Nara", "Yao", "Osaka", "Handa", "Honjocho", "Kishiwada", "Susono", "Nagasaki",
"Setagaya-ku", "Zushi", "Sugito", "Yabasehoncho", "Yamaguchi", "Kanazawa", "Maruyama",
"Tahara", "Obu", "Nishio", "Okinawa", "Urasoe", "Naha", "Chichibu", "Asahi", "Kita-sannomaru",
"Hirokawa", "Ishigaki", "Higashine", "Tsuruoka", "Asahikawa", "Minatomachi", "Sannohe",
"Tottori-shi", "Higashiasahimachi", "Iwata", "Koriyama", "Hanno", "Takarazuka", "Kuwana-shi",
"Kakogawa", "Komaki", "Mitake", "Tondabayashi", "Matsumoto", "Kakamigahara", "Onomichi",
"Kure", "Maebaru", "Tokai",
)
COUNTRIES = {
'US': US_STATES,
'UK': UK_Cities,
'AE': UAE_Cities,
'TT': TT_Cities,
'TH': Thailand_Cities,
'KR': Korea_Cities,
'SG': Singapore_Cities,
'SA': Saudi_Arabia_Cities,
'ID': Indonesia_Cities,
'MY': Malaysia_Cities,
'JP': Japan_Cities
}
TAGS = [
'city', 'country', 'latitude', 'longitude',
'phone', 'postalcode', 'state', 'uid'
]
MAPPING = {
'latitude': 'lat', 'longitude': 'lon', 'uid': 'ref',
'postalcode': 'postcode',
}
class AuntieAnnesSpider(scrapy.Spider):
name = "auntie_annes"
allowed_domains = ["hosted.where2getit.com/auntieannes"]
download_delay = 0.2
def process_poi(self, poi):
props = {}
add_parts = []
for child in poi:
if child.tag in TAGS and child.tag in MAPPING:
if child.tag in ('latitude', 'longitude'):
props[MAPPING[child.tag]] = float(child.text)
else:
props[MAPPING[child.tag]] = child.text
elif child.tag in TAGS and child.tag not in MAPPING:
props[child.tag] = child.text
elif child.tag in ('address1', 'address2', 'address3', ):
add_parts.append(child.text if child.text else '')
props.update({'addr_full': ', '.join(filter(None, add_parts))})
return GeojsonPointItem(**props)
def start_requests(self):
for country, locations in COUNTRIES.items():
for location in locations:
loc = "+".join(location.split(' '))
url = URL.format(location, country)
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
root = ET.fromstring(response.text)
collection = root.getchildren()[0]
pois = collection.findall('poi')
for poi in pois:
yield self.process_poi(poi)
|
from .update_resource import UpdateResource
class VirtualMachineUpdate(UpdateResource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when
sending a request.
:param tags: Resource tags
:type tags: dict[str, str]
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2017_12_01.models.Plan
:param hardware_profile: Specifies the hardware settings for the virtual
machine.
:type hardware_profile:
~azure.mgmt.compute.v2017_12_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2017_12_01.models.StorageProfile
:param os_profile: Specifies the operating system settings for the virtual
machine.
:type os_profile: ~azure.mgmt.compute.v2017_12_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual
machine.
:type network_profile:
~azure.mgmt.compute.v2017_12_01.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2017_12_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set
that the virtual machine should be assigned to. Virtual machines specified
in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see [Manage
the availability of virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set: ~azure.mgmt.compute.v2017_12_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineInstanceView
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier
that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read
using platform BIOS commands.
:vartype vm_id: str
:param identity: The identity of the virtual machine, if configured.
:type identity:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineIdentity
:param zones: The virtual machine zones.
:type zones: list[str]
"""
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(VirtualMachineUpdate, self).__init__(**kwargs)
self.plan = kwargs.get('plan', None)
self.hardware_profile = kwargs.get('hardware_profile', None)
self.storage_profile = kwargs.get('storage_profile', None)
self.os_profile = kwargs.get('os_profile', None)
self.network_profile = kwargs.get('network_profile', None)
self.diagnostics_profile = kwargs.get('diagnostics_profile', None)
self.availability_set = kwargs.get('availability_set', None)
self.provisioning_state = None
self.instance_view = None
self.license_type = kwargs.get('license_type', None)
self.vm_id = None
self.identity = kwargs.get('identity', None)
self.zones = kwargs.get('zones', None)
|
"""Family module for Wikitech."""
from pywikibot import family
class Family(family.WikimediaOrgFamily):
"""Family class for Wikitech."""
name = 'wikitech'
code = 'en'
def protocol(self, code) -> str:
"""Return the protocol for this family."""
return 'https'
|
import os
import argparse
import datetime
from lxml import etree, html
from lxml.html.clean import Cleaner
import fnmatch # To match files by pattern
import re
import time
import pandas as pd
def timeit(method):
"""Time methods."""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f sec' %
(method.__name__, te-ts))
return result
return timed
class TransformHtmlProceedingsToXml(object):
"""Get proceedings of the European Parliament."""
@timeit
def __init__(self):
self.cli()
self.infiles = self.get_files(self.indir, self.pattern)
self.n_proceedings = 0
self.rm_a = Cleaner(remove_tags=['a'])
self.main()
def __str__(self):
message = "Information for {} MEPs extracted!".format(
str(self.n_proceedings))
return message
def get_files(self, directory, fileclue):
"""Get all files in a directory matching a pattern.
Keyword arguments:
directory -- a string for the input folder path
fileclue -- a string as glob pattern
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def read_html(self, infile):
"""Parse a HTML file."""
with open(infile, encoding='utf-8', mode='r') as input:
return html.parse(input)
def serialize(self, infile, root):
ofile_name = os.path.splitext(os.path.basename(infile))[0]
ofile_path = os.path.join(self.outdir, ofile_name+'.xml')
xml = etree.tostring(
root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True).decode('utf-8')
with open(ofile_path, mode='w', encoding='utf-8') as ofile:
ofile.write(xml)
pass
def get_name(self, tree):
name = tree.xpath('//li[@class="mep_name"]')[0]
name = self.rm_a.clean_html(name)
name = html.tostring(name).decode('utf-8')
name = re.sub(r'[\t\n]', r'', name)
name = name.split('<br>')
name = [html.fromstring(x).text_content() for x in name]
name = ' '.join(name)
return name
def get_nationality(self, tree):
nationality = tree.find_class('nationality')[0]
nationality = nationality.text.strip()
return nationality
def get_id(self, infile):
id = os.path.splitext(os.path.basename(infile))[0]
return id
def parse_date(self, a_date, a_pattern):
output = datetime.datetime.strptime(a_date, a_pattern).date()
return output
def get_birth(self, tree):
birth = tree.xpath('.//span[@class="more_info"]')
birth_date = None
birth_place = None
death_date = None
death_place = None
for i in birth:
if i.text is not None:
birth_text = re.sub(r'[\t\n]', r'', i.text)
birth_text = birth_text.strip()
if re.match(r'^Date of birth: (.+?), (.+)$', birth_text):
info = re.match(
r'^Date of birth: (.+?), (.+)$', birth_text)
birth_date = self.parse_date(info.group(1), "%d %B %Y")
birth_place = info.group(2)
elif re.match(r'^Date of birth: (.+?)$', birth_text):
info = re.match(r'^Date of birth: (.+?)$', birth_text)
birth_date = self.parse_date(info.group(1), "%d %B %Y")
birth_place = None
elif re.match(r'^Date of death: (.+?), (.+)$', birth_text):
info = re.match(
r'^Date of death: (.+?), (.+)$', birth_text)
death_date = self.parse_date(info.group(1), "%d %B %Y")
death_place = info.group(2)
elif re.match(r'^Date of death: (.+?)$', birth_text):
info = re.match(r'^Date of death: (.+?)$', birth_text)
death_date = self.parse_date(info.group(1), "%d %B %Y")
death_place = None
return birth_date, birth_place, death_date, death_place
def get_political_groups(self, tree, id):
political_groups = tree.xpath('.//div[@class="boxcontent nobackground"]/h4[contains(., "Political groups")]/following-sibling::ul[1]//li')
output = []
for i in political_groups:
info = i.text
info = re.sub(r'\n', r'', info)
info = re.sub(r'\t+', r'\t', info)
info = re.sub(r' \t/ ', r'\t', info)
info = re.sub(r'\t:\t', r'\t', info)
info = re.sub(r' - ', r'\t', info)
info = re.sub(r'\t$', r'', info)
info = info.strip()
info = info.split('\t')
info = [x.strip() for x in info]
m_state = i.attrib['class']
s_date = self.parse_date(info[0], "%d.%m.%Y")
if info[1] == '...':
e_date = self.date
else:
e_date = self.parse_date(info[1], "%d.%m.%Y")
p_group = info[2]
p_group_role = info[3]
output.append({
'id': id,
'm_state': m_state,
's_date': s_date,
'e_date': e_date,
'p_group': p_group,
'p_group_role': p_group_role})
return output
def get_national_parties(self, tree, id):
political_groups = tree.xpath('.//div[@class="boxcontent nobackground"]/h4[contains(., "National parties")]/following-sibling::ul[1]//li')
output = []
for i in political_groups:
info = i.text
info = re.sub(r'\n', r'', info)
info = re.sub(r'\t+', r'\t', info)
info = re.sub(r' \t/ ', r'\t', info)
info = re.sub(r'\t:\t', r'\t', info)
info = re.sub(r' - ', r'\t', info)
info = re.sub(r'\t$', r'', info)
info = info.strip()
info = info.split('\t')
info = [x.strip() for x in info]
s_date = self.parse_date(info[0], "%d.%m.%Y")
if info[1] == '...':
e_date = self.date
else:
e_date = self.parse_date(info[1], "%d.%m.%Y")
n_party = info[2]
output.append({
'id': id,
's_date': s_date,
'e_date': e_date,
'n_party': n_party})
return output
def extract_info(self, infile):
id = self.get_id(infile)
tree = self.read_html(infile).getroot()
name = self.get_name(tree)
nationality = self.get_nationality(tree)
birth_date, birth_place, death_date, death_place = self.get_birth(tree)
self.meps[id] = {
'name': name,
'nationality': nationality,
'birth_date': birth_date,
'birth_place': birth_place,
'death_date': death_date,
'death_place': death_place
}
self.political_groups = (
self.political_groups + self.get_political_groups(tree, id))
self.national_parties = (
self.national_parties + self.get_national_parties(tree, id))
pass
def serialize_dict_of_dicts(self, dict_of_dicts, ofile_name):
df = pd.DataFrame.from_dict(dict_of_dicts, orient='index')
opath = os.path.join(self.outdir, ofile_name)
df.to_csv(
opath,
sep='\t',
mode='w',
encoding='utf-8',
index_label='id')
pass
def serialize_list_of_dicts(self, list_of_dicts, ofile_name, col_order):
df = pd.DataFrame(list_of_dicts)
df = df[col_order]
opath = os.path.join(self.outdir, ofile_name)
df.to_csv(opath, sep='\t', mode='w', encoding='utf-8', index=False)
pass
def main(self):
self.meps = {}
self.political_groups = []
self.national_parties = []
for infile in self.infiles:
print(infile)
if self.date is None:
self.date = datetime.datetime.fromtimestamp(
os.path.getmtime(infile)).date()
self.extract_info(infile)
self.n_proceedings += 1
self.serialize_dict_of_dicts(self.meps, 'meps.csv')
self.serialize_list_of_dicts(
self.political_groups,
'political_groups.csv',
['id', 'm_state', 's_date', 'e_date', 'p_group', 'p_group_role'])
self.serialize_list_of_dicts(
self.national_parties,
'national_parties.csv',
['id', 's_date', 'e_date', 'n_party'])
pass
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input",
required=True,
help="path to the input directory.")
parser.add_argument(
"-o", "--output",
required=True,
help="path to the output directory.")
parser.add_argument(
'-p', "--pattern",
required=False,
default="*.html",
help="glob pattern to filter files.")
parser.add_argument(
'-d', "--date",
required=False,
default=None,
help="date of download of HTML files.")
args = parser.parse_args()
self.indir = args.input
self.outdir = args.output
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.pattern = args.pattern
self.date = args.date
pass
print(TransformHtmlProceedingsToXml())
|
import pygame
from pygame.locals import *
import numpy as np
grid_size = 100
n_row = 4
n_col = 12
state = np.zeros((n_row * grid_size, n_col * grid_size))
step_size = 0.5
epsilon = 0.1 # parameter for epislon-greedy
N_actions = 4 # number of actions {left,up,right,down}
N_episodes = 600 # number of episodes
goal_r = 3
goal_c = 11
start_r = 3
start_c = 0
q = np.zeros((n_row,n_col,N_actions)) # num_row by num_col by num_states
n_steps = 0
n_episodes = 0
def ep_greedy(epsilon,num_actions,q,i,j):
roll = np.random.uniform(0,1)
# epsilon-greedy strategy
if roll < epsilon: # exploration
a = np.random.randint(0,num_actions)
else: # exploitation
a = np.argmax(q[i,j,:])
return a
def action2state(i,j,a):
# Note: coordintate system start from the upper-left corner and
# right/downwards are the positive direction
if a == 0: # to left
i_next = i
j_next = j - 1
elif a == 1: # upwards
i_next = i - 1
j_next = j
elif a == 2: # to right
i_next = i
j_next = j + 1
else: # downwards
i_next = i + 1
j_next = j
return i_next,j_next
while n_episodes < N_episodes:
# begin of an episode
i = start_r
j = start_c
# end of an episode
n_episodes += 1
print "episode ",str(n_episodes),"..."
while True:
n_steps += 1
# print " step ",str(n_steps),"..."
# choose A from S using policy derived from Q (epsilon-greedy)
a = ep_greedy(epsilon,N_actions,q,i,j)
# translate action into state-change with windy effect
i_next,j_next = action2state(i,j,a)
# update the state-action value function with Sarsa/Q-Learning of choice
# state transitions end in the goal state
# state should be in the range of the gridworld
if i_next == goal_r and j_next == goal_c: # reach the goal position
# q[i,j] = q[i,j] + step_size * (-1 + 0 - q[i,j]) #the Q(terminal,.) = 0
q[i,j,a] = q[i,j,a] + step_size * (-1 + 0 - q[i,j,a]) #the Q(terminal,.) = 0
# Note, transition from noterminal to terminal also gets reward of -1 in this case
break
# different reward/consequence when entering the cliff region
elif i_next == 3 and j_next > 1 and j_next < n_col - 1:
i_next = start_r
j_next = start_c
r = -100
elif i_next < 0 or i_next > n_row -1:
i_next = i
r = -1
elif j_next < 0 or j_next > n_col - 1:
j_next = j
r = -1
else:
r = -1
# a_next = ep_greedy(epsilon,N_actions,q,i_next,j_next)
q[i,j,a] = q[i,j,a] + step_size * (r + max(q[i_next,j_next,:]) - q[i,j,a])
i = i_next
j = j_next
pygame.init()
pygame.display.set_mode((n_col * grid_size,n_row * grid_size))
pygame.display.set_caption('Cliff Walking')
screen = pygame.display.get_surface()
surface = pygame.Surface(screen.get_size())
bg = pygame.Surface(screen.get_size())
def draw_bg(surface,n_row,n_col,grid_size,start_r,start_c,goal_r,goal_c):
for i in range(n_col):
for j in range(n_row):
x = i * grid_size
y = j * grid_size
coords = pygame.Rect(x,y,grid_size,grid_size)
pygame.draw.rect(surface,(255,255,255),coords,1)
# draw start state
pygame.draw.circle(surface,(192,192,192),(start_c * grid_size + grid_size/2,
start_r * grid_size + grid_size/2),grid_size/4)
# draw goal state
pygame.draw.circle(surface,(102,204,0),(goal_c * grid_size + grid_size/2,
goal_r * grid_size + grid_size/2),grid_size/4)
# draw cliff region
x = 1 * grid_size
y = 3 * grid_size
coords = pygame.Rect(x,y,grid_size*10,grid_size)
pygame.draw.rect(surface,(192,192,192),coords)
def step_q(q,s_r,s_c,n_row,n_col):
print "state-action value:"
print q[s_r,s_c,:]
a = np.argmax(q[s_r,s_c,:]) # greedy only
# display debug
if a == 0:
print "move left"
elif a == 1:
print "move upward"
elif a == 2:
print "move right"
else:
print "move downwards"
s_r_next,s_c_next = action2state(s_r,s_c,a)
# define rules especially when the agent enter the cliff region
if s_r_next == 3 and s_c_next > 1 and s_c_next < n_col - 1:
s_r_next = start_r
s_c_next = start_c
# in theory, the produced optimal policy should not enter this branch
elif s_r_next < 0 or s_r_next > n_row -1:
s_r_next = s_r
elif s_c_next < 0 or s_c_next > n_col - 1:
s_c_next = s_c
return s_r_next,s_c_next
s_r = start_r
s_c = start_c
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# draw gridworld background
draw_bg(bg,n_row,n_col,grid_size,start_r,start_c,goal_r,goal_c)
screen.blit(bg,(0,0))
# draw the state of the agent, i.e. the path (start --> end) as the foreground
surface.fill((0,0,0))
# use state-action function to find a optimal policy
# in the loop, should provide a step function
#print (s_r,s_c)
s_r_next,s_c_next = step_q(q,s_r,s_c,n_row,n_col)
#print (s_r_next,s_c_next)
if s_r_next != goal_r or s_c_next != goal_c:
pygame.draw.circle(surface,(255,255,255),(s_c_next * grid_size + grid_size/2,
s_r_next * grid_size + grid_size/2),grid_size/4)
bg.blit(surface,(0,0))
pygame.display.flip() # update
pygame.time.delay(1000)
s_r,s_c = s_r_next,s_c_next # update coordinate
|
__author__ = 'jdaniel'
import copy
import random
import itertools
import operator
import math
import struct
import os
import sys
import json
from collections import defaultdict
class AlgorithmBase(object):
def __init__(self, objective_function):
"""
Base Algorithm class which contains utility functionality
common to all other algorithms and acts as the standalone
API for Algorithm usage.
:param objective_function: <function> The model function to be used
def my_objective(x):
f = list_of_objective_values
h = list_of_equality_constraint_values
g = list_of_inequality_constraint_values
return [f,h,g]
:return: None
"""
self._objective_function = objective_function
self._variables = []
self._equality_constraints = []
self._inequality_constraints = []
self._objectives = []
# Algorithm Options
self._pop_size = None
self._generations = None
self._conv_tol = None
self._eqcon_tol = None
self._seed = None
self._eta_c = None
self._eta_m = None
self._p_cross = None
self._p_mut = None
self._islands = None
self._epoch = None
self._migrants = None
self._spheres = None
# Problem information
self._ndim = None
self._neqcon = None
self._nneqcon = None
self._lower_bound = []
self._upper_bound = []
# Data objects
self._history = History()
self._archive = Archive()
self._metadata = Metadata()
# Random number generator
self._rnd = random.Random()
def register_variable(self, name, lower, upper):
"""
Register a decision variable with the algorithm
:param name: <string> Reference name of the decision variable
:param lower: <float> Lower bound for the variable
:param upper: <float> Upper bound for the variable
:return: None
"""
var = Variable(name, lower, upper)
self._variables.append(var)
def register_constraint(self, name, ctype):
"""
Register a constraint variable with the algorithm
:param name: <string> Reference name of the constraint variable
:param ctype: <string> Set constraint type, 'e': equality constraint; 'i': inequality constraint
:return: None
"""
con = Constraint(name)
if ctype == 'e':
self._equality_constraints.append(con)
elif ctype == 'i':
self._inequality_constraints.append(con)
else:
err_msg = 'Unrecognized constraint type ' + repr(ctype)
raise AlgorithmException(err_msg)
def register_objective(self, name):
"""
Register an objective variable with the algorithm
:param name: <string> Reference name of the objective variable
:return: None
"""
obj = Objective(name)
self._objectives.append(obj)
def set_options(self, option, value):
"""
Set an algorithm option value
:param option: <string> Name of the option to set
:param value: <int, float> Value of the option to set
:return: None
"""
if option == 'population_size':
self.check_population_size(value)
self._pop_size = value
elif option == 'generations':
self.check_generations(value)
self._generations = value
elif option == 'conv_tol':
self.check_conv_tol(value)
self._conv_tol = value
elif option == 'eqcon_tol':
self.check_eqcon_tol(value)
self._eqcon_tol = value
elif option == 'eta_c':
self.check_eta_c(value)
self._eta_c = value
elif option == 'eta_m':
self.check_eta_m(value)
self._eta_m = value
elif option == 'p_cross':
self.check_p_cross(value)
self._p_cross = value
elif option == 'p_mut':
self.check_p_mut(value)
self._p_mut = value
elif option == 'islands':
self.check_islands(value)
self._islands = value
elif option == 'epoch':
self.check_epoch(value)
self._epoch = value
elif option == 'migrants':
self.check_migrants(value)
self._migrants = value
elif option == 'spheres':
self.check_spheres(value)
self._spheres = value
elif option == 'seed':
self.set_seed(value)
else:
err_msg = 'Unrecognized option ' + repr(option)
raise AlgorithmException(err_msg)
def set_seed(self, value):
"""
Set the seed value for the optimisation
:param value: Value to set
:return: None
"""
if value == 0:
self._seed = struct.unpack("<L", os.urandom(4))[0]
else:
self._seed = value
self._rnd.seed(self._seed)
@staticmethod
def check_population_size(value):
"""
Check the population value
:param value: Value to set
:return:
"""
# Check if integer
if not isinstance(value, (int, long)):
err_msg = 'Population is not an integer'
raise AlgorithmException(err_msg)
# Check if greater than zero
if value <= 0:
err_msg = 'Population size must be greater than zero'
raise AlgorithmException(err_msg)
# Check if divisible by 4
if value % 4 != 0:
err_msg = 'Population size must be evenly divisible by four'
raise AlgorithmException(err_msg)
@staticmethod
def check_generations(value):
"""
Check the generations value
:param value: Value to set
:return: None
"""
if value <= 0:
err_msg = 'The generations value but be an integer greater than 0'
raise AlgorithmException(err_msg)
@staticmethod
def check_conv_tol(value):
"""
Check the convergence tolerance value
:param value: Value to set
:return: None
"""
# Check if between (0.0, 1.0)
if value >= 1.0 or value <= 0.0:
err_msg = 'The convergence tolerance value conv_tol must be between (0.0, 1.0)'
raise AlgorithmException(err_msg)
@staticmethod
def check_eqcon_tol(value):
"""
Check the equality constraint tolerance value
:param value: Value to set
:return: None
"""
# Check if greater than 0
if value <= 0.0:
err_msg = 'The equality constraint tolerance value eqcon_tol must be greater than 0'
raise AlgorithmException(err_msg)
@staticmethod
def check_eta_c(value):
"""
Check the crossover distribution index value
:param value: Value to set
:return: None
"""
# Check if greater than zero
if value <= 0:
err_msg = 'The crossover distribution index eta_c must be greater than zero'
raise AlgorithmException(err_msg)
@staticmethod
def check_eta_m(value):
"""
Check the mutation distribution index value
:param value: Value to set
:return: None
"""
# Check if greater than zero
if value <= 0:
err_msg = 'The mutation distribution index eta_m must be greater than zero'
raise AlgorithmException(err_msg)
@staticmethod
def check_p_cross(value):
"""
Check the crossover probability value
:param value: Value to set
:return: None
"""
# Check if between (0.0, 1.0)
if value < 0.0 or value > 1.0:
err_msg = 'The crossover probability p_cross must be between 0.0 and 1.0'
raise AlgorithmException(err_msg)
@staticmethod
def check_p_mut(value):
"""
Check the mutation probability value
:param value: Value to set
:return: None
"""
# Check if between (0.0, 1.0)
if value < 0.0 or value > 1.0:
err_msg = 'The mutation probability p_mut must be between 0.0 and 1.0'
raise AlgorithmException(err_msg)
@staticmethod
def check_islands(value):
"""
Check the number of islands
:param value: Value to set
:return: None
"""
# Check greater than zero
if value <= 0:
err_msg = 'Number of islands must be a positive integer greater than zero'
raise AlgorithmException(err_msg)
@staticmethod
def check_spheres(value):
"""
Check the number of spheres
:param value: Value to set
:return: None
"""
if value <= 0:
err_msg = 'Number of spheres must be a positive integer greater than zero'
raise AlgorithmException(err_msg)
@staticmethod
def check_epoch(value):
"""
Check the epoch rate
:param value: Value to set
:return: None
"""
if value <= 0:
err_msg = 'The epoch rate must be a positive integer greater than zero'
raise AlgorithmException(err_msg)
@staticmethod
def check_migrants(value):
"""
Check the migrants value
:param value: Value to set
:return: None
"""
if value < 0:
err_msg = 'The number of migrants must be zero or greater'
raise AlgorithmException(err_msg)
def setup_variables(self):
"""
Get information about the model once all the variables
have been added.
:return: None
"""
self._ndim = len(self._variables)
self._neqcon = len(self._equality_constraints)
self._nneqcon = len(self._inequality_constraints)
for var in self._variables:
self._lower_bound.append(var.lower)
self._upper_bound.append(var.upper)
def evaluate_population(self, population):
"""
Evaluate a population
:param population: <Population> Population to evaluate
:return: None
"""
for ind in population:
self.evaluate(ind)
def evaluate(self, individual):
"""
Evaluate an individual
:param individual: <Individual> Individual to evaluate
:return: None
"""
f, h, g = self._objective_function(individual.x)
individual.f = f
individual.h = h
individual.g = g
# Calculate the constraint violation
s = 0.0
for i in xrange(self._neqcon):
s += math.fabs(h[i]) - self._eqcon_tol
for i in xrange(self._nneqcon):
s += max(0.0, g[i])
self._history.add_point(individual)
class Individual(object):
def __init__(self):
"""
Class for holding information and methods related to the concept
of an individual.
:return: None
"""
# Decision variables
self.x = None
# Objective variables
self.f = None
# Equality constraint variables
self.h = None
# Inequality constraint variables
self.g = None
# Distance metric
self.d = None
# Constraint violation
self.s = None
# Domination count
self.n = None
# Rank
self.r = None
# ID
self.id = None
def dominates(self, other):
"""
Method to determine if the individual dominates another individual using
the constraint dominance approach.
:param other: <Individual> Other individual to test against
:return: <bool> True if self dominates other, False if other dominates self or both are equal
"""
not_equal = False
flag1 = 0
flag2 = 0
if self.s > 0:
flag1 = 1
if other.s > 0:
flag2 = 1
# Both individuals are invalid
if flag1 == 1 and flag2 == 1:
if self.s < other.s:
return True
else:
return False
# One of the individuals is invalid
elif flag1 ^ flag2:
if flag1:
return False
else:
return True
# Both individuals are valid
else:
for self_fit, other_fit in zip(self.f, other.f):
if self_fit > other_fit:
return False
elif self_fit < other_fit:
not_equal = True
return not_equal
def __hash__(self):
"""
Return a hash based on the objective values of the individuals decision values
:return: Hash for the individual
"""
return hash(repr(self.x))
def __repr__(self):
s = 'ID: ' + repr(self.id) + '\n'
s += 'x: ' + repr(self.x) + '\n'
s += 'f: ' + repr(self.f) + '\n'
if self.h is not None:
s += 'h: ' + repr(self.h) + '\n'
if self.g is not None:
s += 'g: ' + repr(self.g) + '\n'
if self.d is not None:
s += 'd: ' + repr(self.d) + '\n'
if self.s is not None:
s += 's: ' + repr(self.s) + '\n'
if self.r is not None:
s += 'r: ' + repr(self.r) + '\n'
return s
def __eq__(self, other):
for self_x, other_x in zip(self.x, other.x):
if self_x != other_x:
return False
return True
class Population(list):
def __init__(self):
super(Population, self).__init__()
def populate(self, individuals):
"""
Populate the population with a list of individuals
:param individuals: <List<Individual>> List of individuals to use
:return: None
"""
for ind in individuals:
self.append(copy.deepcopy(ind))
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__)
def __repr__(self):
s = ''
for idx, ind in enumerate(self):
s += repr(ind) + '\n'
return s
class SubPopulation(list):
def __init__(self):
super(SubPopulation, self).__init__()
def populate(self, individuals):
"""
Populate the sub-population with a list of individuals
:param individuals: <List<Individual>> List of individuals to use
:return: None
"""
for ind in individuals:
self.append(copy.deepcopy(ind))
class Archive(object):
def __init__(self):
"""
Optimization run archive of non-dominated solutions at each generation
which is used to predict convergence of the algorithm.
:return: None
"""
# Tracks the archive of non-dominated solutions
self._archive = []
# Tracks the size of the non-dominated archive
self._idx = 0
# Tracks the consolidation ratio
self._consolidation_ratio = []
# Population size
self._population_size = None
def initialize(self, population):
"""
Initialize the archive
:param population: <Population> Individuals to initialize the population with
:return: None
"""
self._archive.append(nondominated_sort(population, len(population), first_front_only=True)[0])
self._consolidation_ratio.append(0)
self._population_size = len(population)
def update(self, population):
"""
Update the archive
:param population: <Population> Population to update the archive with
:return: None
"""
nondominated_solutions = nondominated_sort(copy.deepcopy(population), len(population), first_front_only=True)[0]
archive_copy = copy.deepcopy(self._archive[self._idx])
archive_copy = archive_copy + nondominated_solutions
nondominated_solutions = nondominated_sort(archive_copy, len(archive_copy), first_front_only=True)[0]
# Remove copies
nondominated_solutions = list(set(nondominated_solutions))
# Update the archive
self._archive.append(nondominated_solutions)
self._idx += 1
self._consolidation_ratio.append(len(self._archive[self._idx])/float(2*self._population_size))
def get_consolidation_ratio(self):
"""
Return the most recent calculated consolidation ratio
:return: <float> Current consolidation ratio value
"""
return self._consolidation_ratio[self._idx]
def get_consolidation_ratio_history(self):
"""
Return the consolidation ratio history
:return: <List<float>> Consolidation ratio history
"""
return self._consolidation_ratio
def get_archive(self):
"""
Get the saved archive at each update
:return: <List<Population>> archive
"""
return self._archive
class Metadata:
def __init__(self):
self.end_msg = None
self.fevals = None
self.gen = None
def __repr__(self):
s = '\n' + self.end_msg + '\n'
s += 'fevals: ' + repr(self.fevals) + '\n'
s += 'gen: ' + repr(self.gen) + '\n'
return s
class History(list):
def __init__(self):
super(History, self).__init__()
def add_point(self, individual):
"""
Add a design point to the history
:param individual: <Individual> Individual to add to the history
:return: None
"""
self.append(individual)
class Variable(object):
def __init__(self, name, lower, upper):
"""
Data structure that contains decision variable information.
:param name: <string> Reference name for the decision variable
:param lower: <float> Lower bound of the decision variable
:param upper: <float> Upper bound of the decision variable
:return: None
"""
self.name = name
self.lower = lower
self.upper = upper
class Constraint(object):
def __init__(self, name):
"""
Data structure that contains constraint variable information.
:param name: <string> Reference name for the constraint variable
:return: None
"""
self.name = name
class Objective(object):
def __init__(self, name):
"""
Data structure that contains objective variable information.
:param name: <string> Reference name for the objective variable
:return: None
"""
self.name = name
class AlgorithmException(Exception):
def __init__(self, message):
"""
Exception class that gets raised when an error occurs with the algorithm.
:param message: Error message to display
:return: None
"""
Exception.__init__(self, message)
def flatten_population(population_list):
"""
Combine each of the sub-populations into a single global population
:param population_list: <List<Population>>
:return: <List<Individual>>
"""
global_pop = Population()
for pop in population_list:
global_pop.append(pop[:])
return global_pop
def mutation(population, n_dim, lower, upper, eta_m, p_mut):
"""
Performs bounded polynomial mutation on the population.
:param population: <Population> Population to perform mutation on
:param n_dim: <int> Number of decision variable dimensions
:param lower: <list<float>> List of decision variable lower bound values
:param upper: <list<float>> List of upper bound decision variable values
:param eta_m: <float> Mutation index
:param p_mut: <float> Mutation probability
:return: None
"""
for ind in population:
mutate(ind, n_dim, lower, upper, eta_m, p_mut)
def mutate(individual, n_dim, lower, upper, eta_m, p_mut):
"""
Performs bounded polynomial mutation on an individual.
:param individual: <Individual> Individual to perform mutation on
:param n_dim: <int> Number of decision variable dimension
:param lower: <list<float>> List of decision variable lower bound values.
:param upper: <list<float>> List of decision variable upper bound values.
:param eta_m: <float> Mutation index
:param p_mut: <float> Mutation probability
:return: None
"""
for i, xl, xu in zip(xrange(n_dim), lower, upper):
if random.random() <= p_mut:
x = copy.deepcopy(individual.x[i])
delta_1 = (x - xl) / (xu - xl)
delta_2 = (xu - x) / (xu - xl)
rand = random.random()
mut_pow = 1.0 / (eta_m + 1.0)
if rand < 0.5:
xy = 1.0 - delta_1
val = 2.0 * rand + (1.0 - 2.0*rand)*(xy**(eta_m + 1))
delta_q = val**mut_pow - 1.0
else:
xy = 1.0 - delta_2
val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5)*(xy**(eta_m + 1))
delta_q = 1.0 - val**mut_pow
x += delta_q * (xu - xl)
x = min(max(x, xl), xu)
individual.x[i] = x
def crossover(population, n_dim, lower, upper, eta_c, p_cross):
"""
Perform simulated binary crossover on the population.
:param population: <Population> Population to perform crossover on.
:param n_dim: <int> Number of decision variable dimensions.
:param lower: <list<float>> List of decision variable lower bound values.
:param upper: <list<float>> List of decision variable upper bound values.
:param eta_c: <float> Crossover index.
:param p_cross: <float> Crossover probability.
:return: <Population> Child population
"""
child_pop = Population()
child_pop.populate(population)
for ind1, ind2 in zip(child_pop[::2], child_pop[1::2]):
if random.random() <= p_cross:
mate(ind1, ind2, n_dim, lower, upper, eta_c)
return child_pop
def mate(ind1, ind2, n_dim, lower, upper, eta_c):
"""
Performs simulated binary crossover between two individuals to produce
two offspring.
:param ind1: <Individual> First individual involved in crossover
:param ind2: <Individual> Second individual involved in crossover
:param n_dim: <int> Number of decision variable dimensions.
:param lower: <list<float>> List of decision variable lower bound values.
:param upper: <list<float>> List of decision variable upper bound values.
:param eta_c: <float> Crossover index.
:return: None
"""
for i, xl, xu in zip(xrange(n_dim), lower, upper):
if random.random() <= 0.5:
if abs(ind1.x[i] - ind2.x[i]) > 1e-14:
x1 = min(ind1.x[i], ind2.x[i])
x2 = max(ind1.x[i], ind2.x[i])
rand = random.random()
beta = 1.0 + (2.0*(x1 - xl) / (x2 - x1))
alpha = 2.0 - beta**-(eta_c + 1)
if rand <= 1.0 / alpha:
beta_q = (rand*alpha)**(1.0 / (eta_c + 1))
else:
beta_q = (1.0 / (2.0 - rand*alpha))**(1.0 / (eta_c + 1))
c1 = 0.5 * (x1 + x2 - beta_q * (x2 - x1))
beta = 1.0 + (2.0*(xu - x2) / (x2 - x1))
alpha = 2.0 - beta**-(eta_c + 1)
if rand <= 1.0 / alpha:
beta_q = (rand*alpha)**(1.0 / (eta_c + 1))
else:
beta_q = (1.0 / (2.0 - rand * alpha))**(1.0 / (eta_c + 1))
c2 = 0.5 * (x1 + x2 + beta_q*(x2 - x1))
c1 = min(max(c1, xl), xu)
c2 = min(max(c2, xl), xu)
if random.random() <= 0.5:
ind1.x[i] = c2
ind2.x[i] = c1
else:
ind1.x[i] = c1
ind2.x[i] = c2
def selection(population, k):
"""
Apply the NSGA-II selection operator on a population of individuals.
:param population: <Population> Population of individuals to select from
:param k: <int> The number of individuals to select
:return: <Population> Selected population of individuals
"""
pareto_fronts = nondominated_sort(population, k)
for front in pareto_fronts:
assign_crowding_distance(front)
chosen = list(itertools.chain(*pareto_fronts[:-1]))
k -= len(chosen)
if k > 0:
sorted_front = sorted(pareto_fronts[-1], key=operator.attrgetter("d"), reverse=True)
chosen.extend(sorted_front[:k])
return copy.deepcopy(chosen)
def nondominated_sort(population, k, first_front_only=False):
"""
Sort the first k individuals from the population into different nondomination
levels using the Fast Nondominated Sorting Approach proposed by Deb et al.
Function structure and implementation adapted from the DEAP package.
:param first_front_only:
:param population: <Population> Population of individuals to sort
:param k: The number of individuals to select
:return: <List<Individual>> A list of ordered Pareto fronts
"""
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in population:
map_fit_ind[(tuple(ind.f))] = ind
fits = map_fit_ind.keys()
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i+1:]:
if map_fit_ind[tuple(fit_i)].dominates(map_fit_ind[tuple(fit_j)]):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif map_fit_ind[tuple(fit_j)].dominates(map_fit_ind[tuple(fit_i)]):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
map_fit_ind[tuple(fit_i)].r = 1
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].append(map_fit_ind[tuple(fit)])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(population), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += 1
fronts[-1].append(map_fit_ind[tuple(fit_d)])
map_fit_ind[tuple(fit_d)].r = len(fronts)
current_front = next_front
next_front = []
return copy.deepcopy(fronts)
def assign_crowding_distance(individuals):
"""
Assign the crowding distance to each individual.
:param individuals: <Population, List> Individuals to assign crowding distance to.
:return: None
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.f, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].f)
for i in xrange(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, nexxt in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += (nexxt[0][i] - prev[0][i]) / norm
for i, dist in enumerate(distances):
individuals[i].d = dist
def tournament_select(population, k):
"""
Tournament selection based on the constraint dominance principle and the
crowding distance.
:param population: <Population, List> Individuals to select from
:param k: <int> The number of individuals to select.
:return: <List<Individual>> The list of selected individuals
"""
def tournament(ind1, ind2):
if ind1.dominates(ind2):
return copy.deepcopy(ind1)
elif ind2.dominates(ind1):
return copy.deepcopy(ind2)
if ind1.d < ind2.d:
return copy.deepcopy(ind2)
elif ind1.d > ind2.d:
return copy.deepcopy(ind1)
if random.random() <= 0.5:
return copy.deepcopy(ind1)
return copy.deepcopy(ind2)
population_1 = random.sample(population, len(population))
population_2 = random.sample(population, len(population))
chosen = []
for i in xrange(0, k, 4):
chosen.append(tournament(population_1[i], population_1[i+1]))
chosen.append(tournament(population_1[i+1], population_1[i+3]))
chosen.append(tournament(population_2[i], population_2[i+1]))
chosen.append(tournament(population_2[i+2], population_2[i+3]))
return chosen
def update_progress(progress):
bar_length = 20 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(bar_length*progress))
text = "\rPercent: [{0}] {1}% {2}".format("="*block + " "*(bar_length-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
|
"""update cascading rules
Revision ID: 619fe6fe066c
Revises: 73ea6c072986
Create Date: 2017-03-15 10:51:12.494508
"""
revision = "619fe6fe066c"
down_revision = "73ea6c072986"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
"""Freebox component constants."""
from __future__ import annotations
import socket
from homeassistant.components.sensor import SensorEntityDescription
from homeassistant.const import DATA_RATE_KILOBYTES_PER_SECOND, PERCENTAGE, Platform
DOMAIN = "freebox"
SERVICE_REBOOT = "reboot"
APP_DESC = {
"app_id": "hass",
"app_name": "Home Assistant",
"app_version": "0.106",
"device_name": socket.gethostname(),
}
API_VERSION = "v6"
PLATFORMS = [Platform.BUTTON, Platform.DEVICE_TRACKER, Platform.SENSOR, Platform.SWITCH]
DEFAULT_DEVICE_NAME = "Unknown device"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONNECTION_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="rate_down",
name="Freebox download speed",
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download-network",
),
SensorEntityDescription(
key="rate_up",
name="Freebox upload speed",
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload-network",
),
)
CONNECTION_SENSORS_KEYS: list[str] = [desc.key for desc in CONNECTION_SENSORS]
CALL_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="missed",
name="Freebox missed calls",
icon="mdi:phone-missed",
),
)
DISK_PARTITION_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="partition_free_space",
name="free space",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
),
)
DEVICE_ICONS = {
"freebox_delta": "mdi:television-guide",
"freebox_hd": "mdi:television-guide",
"freebox_mini": "mdi:television-guide",
"freebox_player": "mdi:television-guide",
"ip_camera": "mdi:cctv",
"ip_phone": "mdi:phone-voip",
"laptop": "mdi:laptop",
"multimedia_device": "mdi:play-network",
"nas": "mdi:nas",
"networking_device": "mdi:network",
"printer": "mdi:printer",
"router": "mdi:router-wireless",
"smartphone": "mdi:cellphone",
"tablet": "mdi:tablet",
"television": "mdi:television",
"vg_console": "mdi:gamepad-variant",
"workstation": "mdi:desktop-tower-monitor",
}
|
""" Functions and classes dealing with commands. """
|
import os
from slackclient import SlackClient
BOT_NAME = 'chopbot3000'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
import re
import os
import itertools
import time
from string import upper
import ete3
import copy
import subprocess
from collections import defaultdict
from sys import platform
from scipy import stats
from ete3 import Tree
from natsort import natsorted
from Bio import AlignIO
"""
Functions:
~
Chabrielle Allen
Travis Benedict
Peter Dulworth
"""
def run_saved_dgen(stat_file,sequence_files,window_size=999999999999999999999999999999,
window_offset=999999999999999999999999999999, verbose=False, alpha=0.01,
plot=False, meta=False):
"""
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted tuple containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
#decided to do a rename here
alignments = sequence_files
# read in dgen stat from file
# (have to wait for file to exist sometimes)
while not os.path.exists(stat_file):
time.sleep(1)
with(open(stat_file, "r")) as s:
lines = s.readlines()
taxa = eval(lines[0].split(None, 1)[1])
stat_species_tree = lines[1].split(None, 2)[2].replace("\n", "")
stat_species_network = lines[2].split(None, 2)[2].replace("\n", "")
outgroup = lines[3].split(None, 1)[1].replace("\n", "")
invariants = []
for oneInvLine in range(4,len(lines)):
this_line_invariant_group = eval(lines[oneInvLine].split(None, 6)[6])
invariants.append(this_line_invariant_group)
#increase = eval(lines[1].split(None, 2)[2])
#decrease = eval(lines[2].split(None, 2)[2])
#increase_resized = increase
#decrease_resized = decrease
#overall_coefficient = 1
#patterns_to_coeff = {}
# DONE READING IN STATISTIC FROM FILE, RUN THE STAT
#window_size = 5000
#window_offset = 5000
#verbose = True
#alpha = 0.01
#alignments.append(sequence_file)
alignments_to_windows_to_DGEN = calculate_windows_to_DGEN(alignments, taxa, outgroup,
invariants, window_size,
window_offset, verbose, alpha)
# the lazy way to do alignments to d using same function and not having to rewrite a nearly identical function
alignments_to_DGEN = calculate_windows_to_DGEN(alignments, taxa, outgroup,
invariants, 999999999999999999999999999999,
999999999999999999999999999999, verbose, alpha)
for alignment in alignments:
alignments_to_DGEN[alignment] = alignments_to_DGEN[alignment][0]
# print stuff
s = ""
for alignment in alignments:
if verbose:
dgen2_dof, significant_dgen, dgen2_num_ignored, dgen2_chisq, l_pval_dgen = alignments_to_DGEN[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "(format = degrees of freedom, is significant?, num. of sites ignored, chi squared value, DGEN p value)"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_DGEN[alignment]) + "\n"
s += "\n"
s += "Final Overall DGEN p value: {0}".format(l_pval_dgen) + "\n"
s += "Significant p value: {0}".format(significant_dgen) + "\n"
s += "\n"
s += "(Verbose) Number Of Sites Ignored: {0}".format(dgen2_num_ignored) + "\n"
s += "(Verbose) Degrees Of Freedom: {0}".format(dgen2_dof) + "\n"
s += "(Verbose) ChiSquared Value: {0}".format(dgen2_chisq) + "\n"
s += "\n"
s += "For easy plotting of DGEN values:"
s += "\n"
windowIndex = 0
for windowEntryIndex in alignments_to_windows_to_DGEN[alignment]:
s += str(windowIndex) + "," + str(alignments_to_windows_to_DGEN[alignment][windowEntryIndex][4]) + "\n"
windowIndex += 1
else:
l_pval_dgen, significant_dgen = alignments_to_DGEN[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "(format = DGEN p value, is significant?)"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_DGEN[alignment]) + "\n"
s += "\n"
s += "Final Overall DGEN p value: {0}".format(l_pval_dgen) + "\n"
s += "Significant p value: {0}".format(significant_dgen) + "\n"
# finally do one more output of just window#,dgen val for easy plotting
s += "\n"
s += "For easy plotting of DGEN values:"
s += "\n"
windowIndex = 0
for windowEntryIndex in alignments_to_windows_to_DGEN[alignment]:
s += str(windowIndex) + "," + str(alignments_to_windows_to_DGEN[alignment][windowEntryIndex][0]) + "\n"
windowIndex += 1
print s
if plot:
plot_formatting((alignments_to_DGEN, alignments_to_windows_to_DGEN), plot, meta)
return s
def calculate_windows_to_DGEN(alignments, taxa_order, outgroup, list_of_tree_and_net_invariants, window_size, window_offset,
verbose= False, alpha=0.01):
"""
Calculates the DGEN statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
window_size --- the desired window size
windw_offset --- the desired offset between windows
Output:
l_stat --- the L statistic value
windows_to_l --- a mapping of window indices to L statistic values
"""
# create a map that will map from all the patterns we care about to their counts
pattern_count_map = defaultdict(int)
for aLine in list_of_tree_and_net_invariants:
for aPatternGroup in aLine: # technically could skip the first one or just use the first one
for aPattern in aPatternGroup:
pattern_count_map[aPattern] = 0
# Separate the patterns of interest into their two terms
#terms1 = patterns_of_interest[0]
#terms2 = patterns_of_interest[1]
alignments_to_windows_to_d = {}
for alignment in alignments:
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
i = 0
num_windows = 0
if window_size > length_of_sequences:
num_windows = 1
window_size = length_of_sequences
else:
# Determine the total number of windows needed
while i + window_size - 1 < length_of_sequences:
i += window_offset
num_windows += 1
site_idx = 0
windows_to_l = {}
# Iterate over each window
for window in range(num_windows):
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
num_ignored = 0
# Iterate over the indices in each window
for window_idx in range(window_size):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
bases = set([])
# Iterate over each sequence in the alignment
for sequence, taxon in zip(sequence_list, taxon_list):
# Map each taxon to the corresponding base at the site
# Add upper function to handle both lowercase and uppercase sequences identically (wasnt working for undercase sequences)
base = upper(sequence[site_idx])
taxa_to_site[taxon] = base
bases.add(base)
# there are too many non ACTG letters allowed in fasta files, so i will just make sure bases only has A C T G
possibleBases = set([])
possibleBases.add("A")
possibleBases.add("C")
possibleBases.add("G")
possibleBases.add("T")
# Statistic can only be calculated where the nucleotides are known
# this includes things like -'s, but also N's, and I will now exclude anything but ACTG
# if len(bases) == 2 and "-" not in bases and "N" not in bases:
if len(bases) == 2 and bases.issubset(possibleBases):
# Create the pattern that each site has
site_pattern = []
# The ancestral gene is always the same as the outgroup
ancestral = taxa_to_site[outgroup]
# Iterate over each taxon
for taxon in taxa_order:
nucleotide = taxa_to_site[taxon]
# Determine if the correct derived/ancestral status of each nucleotide
if nucleotide == ancestral:
site_pattern.append("A")
else:
site_pattern.append("B")
# Convert the site pattern to a string
sites = pattern_string_generator([site_pattern])
if sites:
site_string = sites[0]
# If the site string is a pattern of interest add to its count for one of the terms
# add to my new DGEN map
if site_string in pattern_count_map:
pattern_count_map[site_string] += 1
#elif "-" in bases or "N" in bases: #(more can happen now, i will just check not in subset of possible bases
elif bases.issubset(possibleBases) == False:
num_ignored += 1
#final catch all else for ignored sites (forgot to add back in check for non biallelic (they were ignored but not counted in verbose output. this line now properly counts them for verbose mode))
#includes also sites that only have all A's or whatever, basically anything non strictly biallelic
else:
num_ignored += 1
#should i add count here for sites that specifically violate biallelic as in 3 or 4 dif letters? (len(bases)>2)?
# Increment the site index
site_idx += 1
# create counts based on tree / net invariant groupings
list_of_tree_and_net_invariants_counts = []
for aLine in list_of_tree_and_net_invariants:
lineAdd = []
for aPatternGroup in aLine: # technically could skip the first one or just use the first one
groupCount = 0
for aPattern in aPatternGroup:
groupCount += pattern_count_map[aPattern]
lineAdd.append(groupCount)
list_of_tree_and_net_invariants_counts.append(lineAdd)
#for debugging, how many sites are actually observed that go into the observed calculations?
# calculate new version of chi squared test
# chi squared = sum of (obs - exp)^2 / exp
# obs is Na and exp is Nt * |Na| / |Nt|
chiSquared = 0
dof = 0
cCardinality = 0
for invariantIndex in range(0,len(list_of_tree_and_net_invariants)):
treeGroupSize = len(list_of_tree_and_net_invariants[invariantIndex][0])
treeGroupCount = list_of_tree_and_net_invariants_counts[invariantIndex][0]
cCardinality += 1
for oneNetInvariant in range(1,len(list_of_tree_and_net_invariants[invariantIndex])):
netGroupSize = len(list_of_tree_and_net_invariants[invariantIndex][oneNetInvariant])
netGroupCount = list_of_tree_and_net_invariants_counts[invariantIndex][oneNetInvariant]
chiNumerator = (netGroupCount - (treeGroupCount * (netGroupSize / float(treeGroupSize)))) ** 2 # apparently 'python will return a float if at least one number in an equation is a float'. apparently this is not actually true as making just the first term here a float does not work
chiDenominator = treeGroupCount * (netGroupSize / float(treeGroupSize))
if chiDenominator != 0: # handles the case where zero counts cause 0 in the denominator
chiSquared += chiNumerator / float(chiDenominator)
else:
chiSquared = 0.0
dof += 1
#final dof is one less than this total (woops >.<)
#dof = dof - 1 (not any more, totally changing the formulation now based on luays comments)
#newest sum|Y| - |C| DoF calc. at this point in the code, dof stores the sum|Y| value
#so i have made a new variable for |C| and i just take the difference
dof = dof - cCardinality
# determine if the chisquared is significant. ALSO NOTE - dispensing with the 'd value' and just looking at chiSq and pval
# Verbose output
if verbose:
signif, chisq, pval = calculate_significance_custom_dof(chiSquared, dof, verbose, alpha)
# The line below can be changed to add more information to the windows to L mapping
#windows_to_l[window] = (l_stat, signif, num_ignored, chisq, pval)
windows_to_l[window] = (dof, signif, num_ignored, chisq, pval)
# Standard output
else:
signif, pval = calculate_significance_custom_dof(chiSquared, dof, verbose, alpha)
windows_to_l[window] = (pval, signif)
# Account for overlapping windows
site_idx += (window_offset - window_size)
alignments_to_windows_to_d[alignment] = windows_to_l
return alignments_to_windows_to_d
def Create_Network_Helper(species_tree, reticulations, inheritanceProb):
"""
leo - making a slightly more user friendly fucntion for this (dont need to input a tuple and maybe other things)
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted single containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
inheritance = (inheritanceProb, 1 - float(inheritanceProb))
#inheritance[0] = inheritanceProb
#inheritance[1] = 1 - float(inheritanceProb)
# check for a species tree file
if os.path.isfile(species_tree):
with open(species_tree) as f:
network = f.readline()
# check for a species tree string
else:
network = species_tree
for i in range(len(reticulations)):
# get taxa for the edge in the network
start = reticulations[i][0]
end = reticulations[i][1]
# add nodes into tree in proper format
#network = network.replace(start, '((' + start + ')#H' + str(i+1) + ':0::' + str(inheritance[0]) + ')') # one too many paranthesis here apparently
network = network.replace(start, '(' + start + ')#H' + str(i + 1) + ':0::' + str(inheritance[0]) + '') # took a paranthesis off
network = network.replace(end, '(#H' + str(i+1) + ':0::' + str(inheritance[1]) + ',' + end + ')')
return network
def generate_network_tree(inheritance, species_tree, reticulations):
"""
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted tuple containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
# check for a species tree file
if os.path.isfile(species_tree):
with open(species_tree) as f:
network = f.readline()
# check for a species tree string
else:
network = species_tree
for i in range(len(reticulations)):
# get taxa for the edge in the network
start = reticulations[i][0]
end = reticulations[i][1]
# add nodes into tree in proper format
network = network.replace(start, '((' + start + ')#H' + str(i+1) + ':0::' + str(inheritance[0]) + ')')
network = network.replace(end, '(#H' + str(i+1) + ':0::' + str(inheritance[1]) + ',' + end + ')')
return network
def genDistinct(n):
"""
Generate all full binary trees with n leaves
Input:
n --- the number of leaves
Output:
dp[-1] --- the set of all full binary trees with n nodes
"""
leafnode = '(.)'
dp = []
newset = set()
newset.add(leafnode)
dp.append(newset)
for i in range(1, n):
newset = set()
for j in range(i):
for leftchild in dp[j]:
for rightchild in dp[i - j - 1]:
newset.add('(' + '.' + leftchild + rightchild + ')')
dp.append(newset)
return dp[-1]
def generate_all_trees(taxa):
"""
Create all trees given a set of taxa
Inputs:
taxa --- a set of the taxa to be used for leaf names
Output:
trees --- the set of all trees over the taxa
"""
# Regex pattern for identifying leaves next to a clade in newick string
pattern = "([\)][a-zA-Z0-9_.-])"
# Generate all distinct binary trees
trees = genDistinct(len(taxa))
# Get all possible permutations of the taxa
taxa_orders = itertools.permutations(taxa)
taxa_orders = list(taxa_orders)
all_trees = []
# Iterate over each tree in the set
for tree in trees:
# Reformat the tree
tree = tree.replace('.', '')
# Iterate over each permutation of taxa
for taxa_perm in taxa_orders:
# Create a copy of the tree
bi_tree = tree
# replace the leaves with taxons and reformat string
for i in range(len(taxa_perm)):
taxon = taxa_perm[i] + ","
bi_tree = bi_tree.replace("()", taxon, 1)
bi_tree = bi_tree.replace(",)", ")")
# Find all instances of a ")" followed by a taxon and add a "," between
clades = re.findall(pattern, bi_tree)
for clade in clades:
taxon = clade[1]
bi_tree = bi_tree.replace(clade, ")," + taxon)
bi_tree = bi_tree.replace(")(", "),(")
bi_tree = bi_tree + ";"
all_trees.append(bi_tree)
return all_trees
def generate_unique_trees(taxa, outgroup):
"""
Generate the set of unique trees over a set of taxa with an outgroup
Inputs:
taxa --- a list of taxa to be used as the leaves of trees
outgroup --- the outgroup to root at
Output:
unique_newicks --- a set of all unique topologies over the given taxa
"""
# Create a set for unique trees
unique_trees = set([])
unique_newicks = set([])
all_trees = generate_all_trees(taxa)
# Iterate over each tree in all_trees
for tree in all_trees:
tree = Tree(tree)
tree.set_outgroup(outgroup)
is_unique = True
# Iterate the unique trees for comparison
for unique_tree in unique_trees:
# Compute robinson-foulds distance
rf_distance = tree.robinson_foulds(unique_tree)[0]
# If rf distance is 0 the tree is not unique
if rf_distance == 0:
is_unique = False
if is_unique:
unique_trees.add(tree)
# Iterate over the trees
for tree in unique_trees:
# Get newick strings from the tree objects
tree = tree.write()
# Get rid of branch lengths in the newick strings
tree = branch_removal(tree)
tree = outgroup_reformat(tree, outgroup)
# Add the newick strings to the set of unique newick strings
unique_newicks.add(tree)
return unique_newicks
def calculate_pgtst(species_tree, gene_tree):
"""
Calculate p(gt|st) or p(gt|sn)
Input:
species_tree --- a species tree or network in newick format
gene_tree --- a gene tree in newick format
Output:
pgtst --- p(gt|st) or p(gt|sn)
"""
# Get the global path name to the jar file
dir_path = os.path.dirname(os.path.realpath(__file__))
j = os.path.join(dir_path, "Unstable.jar")
# Run PhyloNet p(g|S) jar file
p = subprocess.Popen("java -jar {0} {1} {2}".format(j, species_tree, gene_tree), stdout=subprocess.PIPE,
shell=True)
# Read output and convert to float
pgtst = float(p.stdout.readline())
return pgtst
def calculate_newicks_to_stats(species_tree, species_network, unique_trees):
"""
Compute p(g|S) and p(g|N) for each g in unique_trees and
map the tree newick string to those values
Inputs:
species_tree --- the species tree newick string for the taxa
species_network --- the network newick string derived from adding a branch to the species tree between the interested taxa
unique_trees --- the set of all unique topologies over n taxa
outgroup --- the outgroup
Output:
trees_to_pgS--- a mapping of tree newick strings to their p(g|S) values
trees_to_pgN--- a mapping of tree newick strings to their p(g|N) values
"""
trees_to_pgS = {}
trees_to_pgN = {}
if platform == 'darwin':
# macs need single quotes for some reason
species_tree = str(species_tree)
species_tree = "'" + species_tree + "'"
species_network = str(species_network)
species_network = "'" + species_network + "'"
# Iterate over the trees
for tree in unique_trees:
if platform == 'darwin':
# macs need single quotes for some reason
tree = "'" + tree + "'"
p_of_g_given_s = calculate_pgtst(species_tree, tree)
p_of_g_given_n = calculate_pgtst(species_network, tree)
if platform == 'darwin':
# remove the quotes from the tree before we add it to the mapping
tree = tree[1:-1]
trees_to_pgS[tree] = p_of_g_given_s
trees_to_pgN[tree] = p_of_g_given_n
return trees_to_pgS, trees_to_pgN
def outgroup_reformat(newick, outgroup):
"""
Move the location of the outgroup in a newick string to be at the end of the string
Inputs:
newick --- a newick string to be reformatted
outgroup --- the outgroup
Output:
newick --- the reformatted string
"""
# Replace the outgroup and comma with an empty string
newick = newick.replace(outgroup + ",", "")
newick = newick[:-2] + "," + outgroup + ");"
return newick
def pattern_inverter(patterns):
"""
Switches "A"s to "B"s and "B"s to "A" in a site pattern excluding the outgroup
Inputs:
patterns --- a list of site patterns
Output:
inverted --- a list of the inverted patterns
"""
inverted = []
# Iterate over the patterns
for pattern in patterns:
a_count = 0
b_count = 0
inverted_pattern = []
# Iterate over each site in the pattern
for site in pattern:
if site == "A":
inverted_pattern.append("B")
b_count += 1
elif site == "B":
inverted_pattern.append("A")
a_count += 1
if inverted_pattern[-1] != "A":
# Change the last site to an "A"
inverted_pattern[-1] = "A"
b_count -= 1
a_count += 1
if a_count > 1 and b_count > 0:
inverted.append(inverted_pattern)
return inverted
def pattern_string_generator(patterns):
"""
Creates a list of viable pattern strings that are easier to read
Input:
patterns --- a list of lists of individual characters e.g. [["A","B","B","A"],["B","A","B","A"]]
Output:
pattern_strings --- a list of lists of strings e.g. [["ABBA"],["BABA"]]
"""
# Convert the site pattern lists to strings
pattern_strings = []
while patterns:
a_count = 0
b_count = 0
pattern_str = ""
pattern = patterns.pop()
for site in pattern:
if site == "A":
b_count += 1
elif site == "B":
a_count += 1
pattern_str += site
if a_count > 0 and b_count > 0:
pattern_strings.append(pattern_str)
return pattern_strings
def branch_removal(n):
"""
Remove the branch lengths from an inputted newick string
Input:
n --- a newick string
Output:
n --- the reformatted string
"""
float_pattern = "([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
# Regular expressions for removing branch lengths and confidence values
pattern2 = "([\:][\\d])"
pattern3 = "([\)][\\d])"
# Get rid of branch lengths in the newick strings
n = (re.sub(float_pattern, '', n))
n = (re.sub(pattern2, '', n)).replace(":", "")
n = (re.sub(pattern3, ')', n))
return n
def site_pattern_generator(taxa_order, newick, outgroup):
"""
Generate the appropriate AB list patterns
Inputs:
taxa_order --- the desired order of the taxa
newick --- the newick string to generate site patterns for
outgroup --- the outgroup of the tree
Output:
finished_patterns --- the list of site patterns generated for the newick string
"""
# Create a tree object
tree = ete3.Tree(newick, format=1)
tree.ladderize(direction=1)
tree.set_outgroup(outgroup)
# Initialize containers for the final patterns and patterns being altered
final_site_patterns = []
# Keep a count of clades in the tree that contain 2 leaves
clade_count = 0
# Number of possible patterns is number of taxa - 2 + the number of clades
num_patterns = len(taxa_order) - 2
# Initialize pattern to be a list of strings
pattern = ["B" for x in range(len(taxa_order))]
# Create list of nodes in order of appearance
nodes = []
for node in tree.traverse("preorder"):
# Add node name to list of nodes
nodes.append(node.name)
# If there are internal nodes at the second and third position travel over the tree in postorder
if nodes[2] == "" and nodes[3] == "":
nodes = []
for node in tree.traverse("postorder"):
# Add node name to list of nodes
nodes.append(node.name)
# Keep track of visited leaves
seen_leaves = []
# Iterate over the order that the nodes occur beginning at the root
for node_idx in range(len(nodes)):
node = nodes[node_idx]
# If the node is the outgroup add A to the end of the pattern
if node == outgroup:
pattern[-1] = "A"
# Add outgroup to the seen leaves
seen_leaves.append(node)
elif outgroup not in seen_leaves:
pass
# Else if the node is a leaf and is after the outgroup
elif node != "" and seen_leaves[-1] == outgroup and outgroup in seen_leaves:
# If the next node is a leaf a clade has been found
if nodes[node_idx + 1] != "":
node2 = nodes[node_idx + 1]
# Get the indices of the leaves in the pattern
pat_idx1 = taxa_order.index(node)
pat_idx2 = taxa_order.index(node2)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
pattern[pat_idx2] = "A"
clade_count += 1
final_site_patterns.append(pattern)
seen_leaves.append(node)
seen_leaves.append(node2)
# Get the index that final clade occurs at
end_idx = node_idx + 1
break
# Otherwise there is no clade
else:
# Get the index of the leaf in the pattern
pat_idx = taxa_order.index(node)
# Set those pattern indices to "A"
pattern[pat_idx] = "A"
seen_leaves.append(node)
# Get the index that final leaf occurs at
end_idx = node_idx
break
num_patterns = num_patterns + clade_count
# All patterns can be derived from the pattern with the most B's
working_patterns = [pattern for x in range(num_patterns)]
# Pop a pattern off of working patterns and add it to the final site patterns
final_site_patterns.append(working_patterns.pop())
# Iterate over each pattern in working patterns and change them
while working_patterns:
# Get a pattern and copy it
pattern = copy.deepcopy(working_patterns.pop())
# Iterate over the order that the nodes occur beginning at the last clade or leaf
for node_idx in range(end_idx + 1, len(nodes)):
# If the last clade is reached break
if node_idx == len(nodes) - 1:
if node != "":
# Get the index of the leaf in the pattern
pat_idx1 = taxa_order.index(node)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
# Get the index that final leaf occurs at
end_idx = node_idx
break
else:
break
node = nodes[node_idx]
# If the next node is a leaf a clade has been found
if node != "" and nodes[node_idx + 1] != "":
node2 = nodes[node_idx + 1]
# Get the indices of the leaves in the pattern
pat_idx1 = taxa_order.index(node)
pat_idx2 = taxa_order.index(node2)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
pattern[pat_idx2] = "A"
clade_count += 1
final_site_patterns.append(pattern)
# Get the index that final clade occurs at
end_idx = node_idx + 1
break
# Else if the node is a leaf
elif node != "":
# Get the index of the leaf in the pattern
pat_idx1 = taxa_order.index(node)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
# Get the index that final leaf occurs at
end_idx = node_idx
break
# Add the altered pattern to the final site patterns
final_site_patterns.append(pattern)
# Update the working patterns to be the same as the most recent pattern
working_patterns = [pattern for x in range(num_patterns - len(final_site_patterns))]
# Create a list of patterns without duplicates
finished_patterns = []
# Iterate over each pattern and determine which ones are duplicates
for pattern in final_site_patterns:
if pattern not in finished_patterns:
finished_patterns.append(pattern)
# If a site pattern only has a single B consider it as the inverse
for pattern in finished_patterns:
if b_count(pattern) == 1:
finished_patterns.remove(pattern)
new_pattern = pattern_inverter([pattern])[0]
finished_patterns.append(new_pattern)
# Always do calculation with the inverse patterns
inverted_patterns = pattern_inverter(finished_patterns)
# Iterate over the inverted patterns and add them to finished patterns
for pattern in inverted_patterns:
if pattern not in finished_patterns:
finished_patterns.append(pattern)
finished_patterns = pattern_string_generator(finished_patterns)
inverted_patterns = pattern_string_generator(inverted_patterns)
return finished_patterns, inverted_patterns
def newicks_to_patterns_generator(taxa_order, newicks, outgroup):
"""
Generate the site patterns for each newick string and map the strings to their patterns
Inputs:
taxa_order --- the desired order of the taxa
newicks --- a list of newick strings
Output:
newicks_to_patterns --- a mapping of newick strings to their site patterns
"""
newicks_to_patterns = {}
inverse_to_counts = defaultdict(int)
# Iterate over the newick strings
for newick in newicks:
# Get the total set of site patterns and the inverses
all_patterns, inverses = site_pattern_generator(taxa_order, newick, outgroup)
newicks_to_patterns[newick] = all_patterns
# Count the number of times a site pattern appears as an inverse
for pattern in inverses:
inverse_to_counts[pattern] += 1
return newicks_to_patterns, inverse_to_counts
def calculate_pattern_probabilities(newicks_to_patterns, newicks_to_pgS, newicks_to_pgN):
"""
Creates a mapping of site patterns to their total p(g|S) values across all gene trees and
a mapping of site patterns to their total p(g|N) values across all gene trees
Inputs:
newicks_to_patterns --- a mapping of tree newick strings to their site patterns
newicks_to_pgS--- a mapping of tree newick strings to their p(g|S) values
newicks_to_pgN--- a mapping of tree newick strings to their p(g|N) values
Outputs:
patterns_to_pgS --- a mapping of site patterns to their total p(g|S) value
patterns_to_pgN --- a mapping of site patterns to their total p(g|N) value
"""
patterns_to_pgS = defaultdict(float)
patterns_to_pgN = defaultdict(float)
# Iterate over each newick string
for newick in newicks_to_patterns:
# Iterate over each site pattern of a tree
for pattern in newicks_to_patterns[newick]:
patterns_to_pgS[pattern] += newicks_to_pgS[newick]
patterns_to_pgN[pattern] += newicks_to_pgN[newick]
return patterns_to_pgS, patterns_to_pgN
def determine_patterns(pattern_set, patterns_to_equality, patterns_to_pgN, patterns_to_pgS, use_inv):
"""
Determine which patterns are useful in determining introgression
Inputs:
pattern_set -- a set containing all patterns of interest
patterns_to_equality --- a mapping of site patterns to site patterns with equivalent p(gt|st)
patterns_to_pgN --- a mapping of site patterns to their total p(g|N) value for a network
patterns_to_pgS --- a mapping of site patterns to their total p(g|st)
Outputs:
terms1 --- set of patterns to count whose probabilities increase under introgression
terms2 --- set of patterns to count whose probabilities decrease under introgression
"""
terms1 = set([])
terms2 = set([])
# Iterate over each pattern to determine the terms of interest
for pattern1 in pattern_set:
pat1_prob = patterns_to_pgN[pattern1]
if pattern1 in patterns_to_equality.keys():
for pattern2 in patterns_to_equality[pattern1]:
pat2_prob = patterns_to_pgN[pattern2]
# Issues with randomness when very small values are close but not technically equal
if not approximately_equal(pat1_prob, pat2_prob):
if pat1_prob > pat2_prob:
terms1.add(pattern1)
terms2.add(pattern2)
elif pat1_prob < pat2_prob:
terms1.add(pattern2)
terms2.add(pattern1)
terms1_resized, terms2_resized = resize_terms(terms1, terms2, patterns_to_pgS, use_inv)
patterns_to_coefficients = scale_terms(terms1, terms2, patterns_to_pgS)
return terms1, terms2, terms1_resized, terms2_resized, patterns_to_coefficients
def resize_terms(terms1, terms2, patterns_to_pgS, use_inv):
"""
Resize the terms to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
use_inv --- boolean for determining if inverse site patterns will be used
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
for tree in terms1:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
removed = set([])
# The number of site patterns to remove is the difference in counts
num_remove = abs(count2 - count1)
if use_inv:
# If not using inverses remove the inverse along with the normal pattern
num_remove = num_remove / 2
# If probabilities do not occur an equal number of times remove site patterns until they do
if count1 > count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees1[prob])).pop(0)
pgtst_to_trees1[prob].remove(r)
removed.add(r)
terms1_remove = True
if count1 < count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees2[prob])).pop(0)
pgtst_to_trees2[prob].remove(r)
removed.add(r)
terms1_remove = False
if use_inv:
# Remove site patterns and their inverses
rm = set([])
inv_rm = pattern_inverter(removed)
for pattern in inv_rm:
rm.add(''.join(pattern))
removed = removed.union(rm)
# Iterate over each pattern to be removed and remove it
for pattern in removed:
if terms1_remove:
terms1.remove(pattern)
else:
terms2.remove(pattern)
terms1, terms2 = tuple(terms1), tuple(terms2)
return terms1, terms2
def scale_terms(terms1, terms2, patterns_to_pgS):
"""
Multiply the terms by a scalar to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
patterns_to_coefficient --- a mapping of site patterns to a coefficent to multiply their counts by
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
patterns_to_coefficient = {}
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
# Get the patterns in the left set of terms corresponding the probability
patterns1 = pgtst_to_trees1[prob]
# Multiply each term in terms1 by count2 / count1
for pattern in patterns1:
patterns_to_coefficient[pattern] = float(count2) / count1
return patterns_to_coefficient
def generate_statistic_string(patterns_of_interest):
"""
Create a string representing the statistic for determining introgression like "(ABBA - BABA)/(ABBA + BABA)"
Input:
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
Output:
L_statistic --- a string representation of the statistic
"""
calculation = []
# Iterate over each set of patterns
for pattern_set in patterns_of_interest:
term = "("
# Combine each term with a "+"
for pattern in sorted(pattern_set):
term = term + pattern + " + "
term = term[:-3] + ")"
calculation.append(term)
L_statistic = "({0} - {1}) / ({0} + {1})".format(calculation[0], calculation[1])
return L_statistic
def calculate_significance_custom_dof(chiSqValue, dofValue, verbose, alpha):
"""
Determines statistical significance based on a chi-squared goodness of fit test
Input:
left --- the total count for site patterns in the left term of the statistic
right --- the total count for site patterns in the right term of the statistic
verbose --- a boolean corresponding to a verbose output
alpha --- the significance level
Output:
significant --- a boolean corresponding to whether or not the result is statistically significant
"""
# Calculate the test statistic
# if left + right > 0:
# chisq = abs((left - right)**2 / float(left + right))
# else:
# chisq = 0
# Calculate the p-value based on a chi square distribution with df = 1
# pval = 1 - stats.chi2.cdf(chisq, 1)
chiSquaredDistVal = stats.chi2.cdf(chiSqValue, dofValue)
pval = 1 - chiSquaredDistVal
if pval < alpha:
signif = True
else:
signif = False
if verbose:
return signif, chiSqValue, pval
else:
return signif, pval
def calculate_significance(left, right, verbose, alpha):
"""
Determines statistical significance based on a chi-squared goodness of fit test
Input:
left --- the total count for site patterns in the left term of the statistic
right --- the total count for site patterns in the right term of the statistic
verbose --- a boolean corresponding to a verbose output
alpha --- the significance level
Output:
significant --- a boolean corresponding to whether or not the result is statistically significant
"""
# Calculate the test statistic
if left + right > 0:
chisq = abs((left - right)**2 / float(left + right))
else:
chisq = 0
# Calculate the p-value based on a chi square distribution with df = 1
pval = 1 - stats.chi2.cdf(chisq, 1)
if pval < alpha:
signif = True
else:
signif = False
if verbose:
return signif, chisq, pval
else:
return signif
def calculate_L(alignments, taxa_order, outgroup, patterns_of_interest, verbose, alpha, patterns_of_interest_resized,
overall_coefficient=1, patterns_to_coefficients={}):
"""
Calculates the L statistic for the given alignment
Input:
alignments --- a list of sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
verbose --- a booolean if more output information is desired
alpha --- the significance value
patterns_of_interest_resized --- the patterns of interest after block resizing
overall_coefficient --- the probability coefficient used to maintain the null hypothesis
patterns _to_coefficients --- a mapping of site patterns to coefficients needed to maintain the null
Output:
l_stat --- the L statistic value
significant --- a boolean denoting if the l_stat value is statistically significant
"""
# Separate the patterns of interest into their two terms
terms1 = patterns_of_interest[0]
terms2 = patterns_of_interest[1]
# Do the same for the resized terms
terms1_resized = patterns_of_interest_resized[0]
terms2_resized = patterns_of_interest_resized[1]
# Create a mapping for each generalized D type
alignments_to_d_resized = {}
alignments_to_d_pattern_coeff = {}
alignments_to_d_ovr_coeff = {}
for alignment in alignments:
# Initialize these things for all files
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
terms1_counts_resized = defaultdict(int)
terms2_counts_resized = defaultdict(int)
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
length_of_sequences = len(min(sequence_list, key=len))
num_ignored = 0
# Iterate over the site indices
for site_idx in range(length_of_sequences):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
bases = set([])
# Iterate over each sequence in the alignment
for sequence, taxon in zip(sequence_list, taxon_list):
# Map each taxon to the corresponding base at the site
base = sequence[site_idx]
taxa_to_site[taxon] = base
bases.add(base)
# Statistic can only be calculated where the nucleotides are known
if "-" not in bases and "N" not in bases and len(bases) == 2:
# Create the pattern that each site has
site_pattern = []
# The ancestral gene is always the same as the outgroup
ancestral = taxa_to_site[outgroup]
# Iterate over each taxon
for taxon in taxa_order:
nucleotide = taxa_to_site[taxon]
# Determine if the correct derived/ancestral status of each nucleotide
if nucleotide == ancestral:
site_pattern.append("A")
else:
site_pattern.append("B")
sites = pattern_string_generator([site_pattern])
if sites:
site_string = sites[0]
# If the site string is a pattern of interest add to its count for one of the terms
if site_string in terms1:
terms1_counts[site_string] += 1
if site_string in terms2:
terms2_counts[site_string] += 1
if site_string in terms1_resized:
terms1_counts_resized[site_string] += 1
if site_string in terms2_resized:
terms2_counts_resized[site_string] += 1
elif "-" in bases or "N" in bases:
num_ignored += 1
terms1_total = sum(terms1_counts.values())
terms2_total = sum(terms2_counts.values())
terms1_total_resized = sum(terms1_counts_resized.values())
terms2_total_resized = sum(terms2_counts_resized.values())
# Calculate the generalized d for the block resizing method
numerator_resized = terms1_total_resized - terms2_total_resized
denominator_resized = terms1_total_resized + terms2_total_resized
if denominator_resized != 0:
l_stat_resized = numerator_resized / float(denominator_resized)
else:
l_stat_resized = 0
# Calculate the generalized d for the total coefficient method
numerator_ovr_coeff = (overall_coefficient * terms1_total) - terms2_total
denominator_ovr_coeff = (overall_coefficient * terms1_total) + terms2_total
if denominator_ovr_coeff != 0:
l_stat_ovr_coeff = numerator_ovr_coeff / float(denominator_ovr_coeff)
else:
l_stat_ovr_coeff = 0
# Calculate the generalized d for the pattern coefficient method
weighted_terms1_total, weighted_counts = weight_counts(terms1_counts, patterns_to_coefficients)
numerator_pattern_coeff = weighted_terms1_total - terms2_total
denominator_pattern_coeff = weighted_terms1_total + terms2_total
if denominator_pattern_coeff != 0:
l_stat_pattern_coeff = numerator_pattern_coeff / float(denominator_pattern_coeff)
else:
l_stat_pattern_coeff = 0
# Verbose output
if verbose:
significant, chisq, pval = calculate_significance(terms1_total_resized, terms2_total_resized, verbose, alpha)
alignments_to_d_resized[
alignment] = l_stat_resized, significant, terms1_counts_resized, terms2_counts_resized, num_ignored, chisq, pval
significant, chisq, pval = calculate_significance(weighted_terms1_total, terms2_total, verbose, alpha)
alignments_to_d_pattern_coeff[
alignment] = l_stat_pattern_coeff, significant, weighted_counts, terms2_counts, num_ignored, chisq, pval
significant, chisq, pval = calculate_significance(overall_coefficient * terms1_total, terms2_total, verbose, alpha)
alignments_to_d_ovr_coeff[
alignment] = l_stat_ovr_coeff, significant, terms1_counts, terms2_counts, num_ignored, chisq, pval, overall_coefficient
# Standard output
else:
significant = calculate_significance(terms1_total_resized, terms2_total_resized, verbose,
alpha)
alignments_to_d_resized[
alignment] = l_stat_resized, significant
significant = calculate_significance(weighted_terms1_total, terms2_total, verbose, alpha)
alignments_to_d_pattern_coeff[
alignment] = l_stat_pattern_coeff, significant
significant = calculate_significance(overall_coefficient * terms1_total, terms2_total, verbose,alpha)
alignments_to_d_ovr_coeff[
alignment] = l_stat_ovr_coeff, significant
return alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff
def weight_counts(term_counts, patterns_to_coefficients):
"""
Inputs:
term_counts --- a mapping of terms to their counts
patterns_to_coefficients --- a mapping of site patterns to coefficients needed to maintain the null
Output:
weighted_total --- the total counts for the site patterns weighted
"""
# Create a mapping of patterns to their weighted counts
weighted_counts = {}
# Iterate over each pattern
for pattern in term_counts:
# Weight its count based on the coefficient
if pattern in patterns_to_coefficients:
coefficient = patterns_to_coefficients[pattern]
else:
coefficient = 1
count = term_counts[pattern]
weighted_counts[pattern] = count * coefficient
weighted_total = sum(weighted_counts.values())
return weighted_total, weighted_counts
def calculate_windows_to_L(alignments, taxa_order, outgroup, patterns_of_interest, window_size, window_offset,
verbose= False, alpha=0.01):
"""
Calculates the L statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
window_size --- the desired window size
windw_offset --- the desired offset between windows
Output:
l_stat --- the L statistic value
windows_to_l --- a mapping of window indices to L statistic values
"""
# Separate the patterns of interest into their two terms
terms1 = patterns_of_interest[0]
terms2 = patterns_of_interest[1]
alignments_to_windows_to_d = {}
for alignment in alignments:
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
i = 0
num_windows = 0
if window_size > length_of_sequences:
num_windows = 1
window_size = length_of_sequences
else:
# Determine the total number of windows needed
while i + window_size - 1 < length_of_sequences:
i += window_offset
num_windows += 1
site_idx = 0
windows_to_l = {}
# Iterate over each window
for window in range(num_windows):
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
num_ignored = 0
# Iterate over the indices in each window
for window_idx in range(window_size):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
bases = set([])
# Iterate over each sequence in the alignment
for sequence, taxon in zip(sequence_list, taxon_list):
# Map each taxon to the corresponding base at the site
base = sequence[site_idx]
taxa_to_site[taxon] = base
bases.add(base)
# Statistic can only be calculated where the nucleotides are known
if "-" not in bases and len(bases) == 2:
# Create the pattern that each site has
site_pattern = []
# The ancestral gene is always the same as the outgroup
ancestral = taxa_to_site[outgroup]
# Iterate over each taxon
for taxon in taxa_order:
nucleotide = taxa_to_site[taxon]
# Determine if the correct derived/ancestral status of each nucleotide
if nucleotide == ancestral:
site_pattern.append("A")
else:
site_pattern.append("B")
# Convert the site pattern to a string
sites = pattern_string_generator([site_pattern])
if sites:
site_string = sites[0]
# If the site string is a pattern of interest add to its count for one of the terms
if site_string in terms1:
terms1_counts[site_string] += 1
elif site_string in terms2:
terms2_counts[site_string] += 1
elif "-" in bases or "N" in bases:
num_ignored += 1
# Increment the site index
site_idx += 1
terms1_total = sum(terms1_counts.values())
terms2_total = sum(terms2_counts.values())
numerator = terms1_total - terms2_total
denominator = terms1_total + terms2_total
if denominator != 0:
l_stat = numerator / float(denominator)
else:
l_stat = 0
# Verbose output
if verbose:
signif, chisq, pval = calculate_significance(terms1_total, terms2_total, verbose, alpha)
# The line below can be changed to add more information to the windows to L mapping
windows_to_l[window] = (l_stat, signif, num_ignored, chisq, pval)
# Standard output
else:
signif = calculate_significance(terms1_total, terms2_total, verbose, alpha)
windows_to_l[window] = (l_stat, signif)
# Account for overlapping windows
site_idx += (window_offset - window_size)
alignments_to_windows_to_d[alignment] = windows_to_l
return alignments_to_windows_to_d
def branch_adjust(species_tree):
"""
Create all possible combinations of branch lengths for the given species tree
Input:
species_tree --- a newick string containing the overall species tree
Output:
adjusted_trees --- a set of trees with all combinations of branch lengths
"""
branch_lengths = [.5, 1.0, 2.0, 4.0]
adjusted_trees = set([])
taxa = []
pattern = "((?<=\()[\w]+)|((?<=\,)[\w]+)"
leaves = re.findall(pattern, species_tree)
for leaf in leaves:
if leaf[0] == '':
taxa.append(leaf[1])
else:
taxa.append(leaf[0])
for b in branch_lengths:
new_t = species_tree
for taxon in taxa:
new_t = new_t.replace(taxon, "{0}:{1}".format(taxon, b))
new_t = new_t.replace("),", "):{0},".format(b))
adjusted_trees.add(new_t)
return adjusted_trees, taxa
def approximately_equal(x, y, tol=0.00000000000001):
"""
Determines if floats x and y are equal within a degree of uncertainty
Inputs:
x --- a float
y --- a float
tol --- an error tolerance
"""
return abs(x - y) <= tol
def equality_sets(species_trees, network, taxa, outgroup, use_inv):
"""
Create mappings of site patterns to patterns with equivalent probabilities
Input:
species_tree --- a newick string containing the overall species tree without branch lengths
Output:
trees_to_equality --- a mapping of tree strings to a set of other trees with the same p(gt|st)
trees_to_equality --- a mapping of tree strings to a set of other trees with the same p(gt|N)
"""
st_to_pattern_probs = {}
st_to_pattern_probs_N = {}
trees_to_equality = {}
trees_to_equality_N = {}
gene_trees = generate_unique_trees(taxa, outgroup)
newick_patterns, inverses_to_counts = newicks_to_patterns_generator(taxa, gene_trees, outgroup)
# If inverses are not desired remove them
if not use_inv:
newick_patterns = remove_inverse(newick_patterns, inverses_to_counts)
for st in species_trees:
ts_to_pgS, ts_to_pgN = calculate_newicks_to_stats(st, network, gene_trees)
patterns_pgS, patterns_pgN = calculate_pattern_probabilities(newick_patterns, ts_to_pgS, ts_to_pgN)
st_to_pattern_probs[st] = sorted(patterns_pgS.items(), key=lambda tup: tup[1], reverse=True)
st_to_pattern_probs_N[st] = sorted(patterns_pgN.items(), key=lambda tup: tup[1], reverse=True)
# Generate equality sets based on p(gt|st)
for st in sorted(st_to_pattern_probs.keys()):
gt_probs = st_to_pattern_probs[st]
for i in range(len(gt_probs)):
gt1, prob1 = gt_probs[i]
equal_trees = set([])
for j in range(len(gt_probs)):
gt2, prob2 = gt_probs[j]
if approximately_equal(prob1, prob2):
equal_trees.add(gt2)
# Add the equality set to the mapping if tbe pattern is not already in the mapping and set is non empty
if len(equal_trees) != 0:
trees_to_equality[gt1] = equal_trees
# Generate equality sets based on p(gt|N)
for st in sorted(st_to_pattern_probs_N.keys()):
gt_probs = st_to_pattern_probs_N[st]
for i in range(len(gt_probs)):
gt1, prob1 = gt_probs[i]
equal_trees = set([])
for j in range(len(gt_probs)):
gt2, prob2 = gt_probs[j]
if approximately_equal(prob1, prob2):
equal_trees.add(gt2)
# Add the equality set to the mapping if tbe pattern is not already in the mapping and set is non empty
if len(equal_trees) != 0:
trees_to_equality_N[gt1] = equal_trees
return trees_to_equality, trees_to_equality_N, patterns_pgS, patterns_pgN
def set_of_interest(trees_to_equality, trees_to_equality_N):
"""
Inputs:
trees_to_equality --- a mapping of tree strings to a set of other trees with the same p(gt|st)
trees_to_equality_N --- a mapping of tree strings to a set of other trees with the same p(gt|N)
Output:
trees_of_interest --- a set of trees that changed equality under the species network
"""
trees_of_interest = set([])
for tree in trees_to_equality:
if tree not in trees_to_equality_N:
t_set = copy.deepcopy(trees_to_equality[tree])
t_set.add(tree)
trees_of_interest = trees_of_interest.union(t_set)
elif trees_to_equality[tree] != trees_to_equality_N[tree]:
t_set = copy.deepcopy(trees_to_equality[tree])
t_set.add(tree)
trees_of_interest = trees_of_interest.union(t_set)
return trees_of_interest
def concat_directory(directory_path):
"""
Concatenates all the alignments in a given directory and returns a single file.
Input:
directory_path --- a string path to the directory the use wants to use.
Output:
file_path --- a string path to the file that was created as a result of the concatenation.
"""
# filter out hidden files
filenames = filter(lambda n: not n.startswith(".") , natsorted(os.listdir(directory_path)))
# get the number of lines on each file
with open(os.path.join(directory_path, filenames[0]), "r") as f:
n = len(list(f))
# initialize a list with an empty string for each line
output_file_list = [""] * n
# Iterate over each folder in the given directory in numerical order
for i in range(len(filenames)):
# get full path of file
input_file = os.path.join(directory_path, filenames[i])
# if its a fasta file -> convert to phylip
if filenames[i].endswith(".fa") or filenames[i].endswith(".fasta"):
input_handle = open(input_file, "rU")
output_handle = open(input_file + ".phylip", "w")
alignments = AlignIO.parse(input_handle, "fasta")
AlignIO.write(alignments, output_handle, "phylip-sequential")
output_handle.close()
input_handle.close()
input_file = input_file + ".phylip"
# create a list of the input files lines
with open(input_file, 'r') as f:
input_file_list = [l.rstrip() for l in list(f)]
for j in range(len(input_file_list)):
# if this is the first file
if i == 0:
output_file_list[j] = input_file_list[j]
else:
if j == 0:
num_bp = int(input_file_list[0].split(" ")[2])
total_bp = int(output_file_list[j].split(" ")[2]) + num_bp
output_file_list[j] = " " + str(n - 1) + " " + str(total_bp)
else:
output_file_list[j] += input_file_list[j].split(" ")[-1]
# write the contents of the output file list to a text file
with open(os.path.abspath(directory_path) + "/concatFile.phylip.txt", "w") as o:
for line in output_file_list:
print >> o, line
return os.path.abspath(directory_path) + "/concatFile.phylip.txt"
def remove_inverse(newick_patterns, inverses_to_counts):
"""
Remove inverse site patterns
Input:
term --- a tuple of site patterns and their inverses
Output:
term --- the original tuple with site patterns removed
"""
# Create a ,mapping of each site pattern to its inverse
patterns_to_inverse = {}
d = set([])
for newick in newick_patterns:
d = d.union(set(newick_patterns[newick]))
#Map each pattern to its inverse
for newick in newick_patterns:
for pattern in newick_patterns[newick]:
# Represent the pattern as a list
pattern_lst = [x for x in pattern]
# Create the inverse pattern
inv_lst = pattern_inverter([pattern_lst])[0]
inverse = ''.join(inv_lst)
# If the pattern is not already in the mapping map it
if pattern not in patterns_to_inverse.keys() and pattern not in patterns_to_inverse.values():
patterns_to_inverse[pattern] = inverse
# Real inverses are the site patterns that appear as inverses more frequently
real_inverses = []
# Iterate over all possible patterns
for pat in patterns_to_inverse:
possible_inv = patterns_to_inverse[pat]
# If a pattern only has one B define it as the inverse
if b_count(pat) == 1:
real_inverses.append(pat)
elif b_count(possible_inv) == 1:
real_inverses.append(possible_inv)
# If a pattern appears as an inverse more often than its "inverse" then it is an inverse
elif inverses_to_counts[pat] > inverses_to_counts[possible_inv]:
real_inverses.append(pat)
# Otherwise the "inverse" is the true inverse
else:
real_inverses.append(possible_inv)
# Remove all real inverse
for newick in newick_patterns:
inverses_removed = list(newick_patterns[newick])
for p in newick_patterns[newick]:
if p in real_inverses:
inverses_removed.remove(p)
newick_patterns[newick] = inverses_removed
return newick_patterns
def b_count(pattern):
"""
Count the number of B's that occur in a site pattern
Input:
pattern --- a site pattern
Output:
num_b --- the number of B's in the site pattern
"""
num_b = 0
for char in pattern:
if char == "B":
num_b += 1
return num_b
def calculate_total_term_prob(patterns_pgS, term):
"""
Calculate the total probability for a term
"""
term_prob = 0
for pattern in term:
term_prob += patterns_pgS[pattern]
return term_prob
def calculate_generalized(alignments, species_tree=None, reticulations=None, outgroup=None, window_size=100000000000,
window_offset=100000000000, verbose=False, alpha=0.01, use_inv=False, useDir=False, directory="",
statistic=False, save=False, f="DGenStatistic_", plot=False, meta=False):
"""
Calculates the L statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
species_tree --- the inputted species tree over the given taxa
reticulations --- a tuple containing two dictionaries mapping the start leaves to end leaves
outgroup --- the desired root of the species tree
window_size --- the desired window size
window_offset --- the desired offset between windows
verbose --- a boolean for determining if extra information will be printed
alpha --- the significance level
use_inv --- a boolean for using inverse site patterns or not
useDir --- a boolean for determining if the user wants to input an entire directory of alignments or only a single alignment
directory --- a string path to the directory the use wants to use. NOTE: only necessary if useDir=True.
statistic --- a text file containing a saved statistic
save --- a boolean corresponding to save a statistic or not, note that saving uses block resizing
f --- the desired save statistic file name
plot --- a boolean corresponding to using plot formatting for the output
meta --- a string of metadata added to the plot formatting output
Output:
l_stat --- the generalized d statistic value
"""
# If the user does not have a specific statistic file to use
if not statistic:
st = re.sub("\:\d+\.\d+", "", species_tree)
st = Tree(st)
st.set_outgroup(outgroup)
st.ladderize(direction=1)
st = st.write()
trees, taxa = branch_adjust(st)
network = generate_network_tree((0.1, 0.9), list(trees)[0], reticulations)
trees_to_equality, trees_to_equality_N, patterns_pgS, patterns_pgN = equality_sets(trees, network, taxa, outgroup, use_inv)
trees_of_interest = set_of_interest(trees_to_equality, trees_to_equality_N)
increase, decrease, increase_resized, decrease_resized, patterns_to_coeff = determine_patterns(
trees_of_interest, trees_to_equality, patterns_pgN, patterns_pgS, use_inv)
# Calculate the total probabilities for creating a coefficient
inc_prob = calculate_total_term_prob(patterns_pgS, increase)
dec_prob = calculate_total_term_prob(patterns_pgS, decrease)
if inc_prob != 0:
overall_coefficient = dec_prob / inc_prob
else:
overall_coefficient = 0
# If users want to save the statistic and speed up future runs
if save:
num = 0
file_name = f + ".txt"
while os.path.exists(file_name):
file_name = "DGenStatistic_{0}.txt".format(num)
num += 1
with open(file_name, "w") as text_file:
output_str = "Taxa: {0}\n".format(taxa)
text_file.write(output_str)
output_str = "Left Terms: {0}\n".format(increase_resized)
text_file.write(output_str)
output_str = "Right Terms: {0}\n".format(decrease_resized)
text_file.write(output_str)
output_str = "Statistic: {0}\n".format(generate_statistic_string((increase_resized, decrease_resized)))
text_file.write(output_str)
output_str = "Species Tree: {0}\n".format(species_tree)
text_file.write(output_str)
output_str = "Outgroup: {0}\n".format(outgroup)
text_file.write(output_str)
output_str = "Reticulations: {0}\n".format(reticulations)
text_file.write(output_str)
text_file.close()
# Users can specify a previously generated statistic to use for alignment counting
else:
with(open(statistic, "r")) as s:
lines = s.readlines()
taxa = eval(lines[0].split(None, 1)[1])
increase = eval(lines[1].split(None, 2)[2])
decrease = eval(lines[2].split(None, 2)[2])
outgroup = lines[5].split(None, 1)[1].replace("\n", "")
increase_resized = increase
decrease_resized = decrease
overall_coefficient = 1
patterns_to_coeff = {}
if useDir:
alignments = [concat_directory(directory)]
alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff = calculate_L(
alignments, taxa, outgroup, (increase, decrease), verbose, alpha, (increase_resized, decrease_resized),
overall_coefficient, patterns_to_coeff)
alignments_to_windows_to_d = calculate_windows_to_L(alignments, taxa, outgroup, (increase_resized, decrease_resized), window_size,
window_offset, verbose, alpha)
s = ""
n = ""
# Create the output string
if verbose and not statistic:
s += "\n"
s += "Probability of gene tree patterns: " + str(patterns_pgS) + "\n"
s += "\n"
s += "Probability of species network patterns:" + str(patterns_pgN) + "\n"
s += "\n"
s += "Patterns that were formerly equal with increasing probability: " + str(increase) + "\n"
s += "Patterns that were formerly equal with decreasing probability: " + str(decrease) + "\n"
s += "Total p(gt|st) for increasing site patterns: " + str(inc_prob) + "\n"
s += "Total p(gt|st) for decreasing site patterns: " + str(dec_prob) + "\n"
s += "\n"
s += "Taxa order used for site patterns: " + str(taxa) + "\n"
s += "Statistic without coefficient weighting: " + str(generate_statistic_string((increase, decrease))) + "\n"
s += "\n"
s += "Increasing patterns after block resizing: " + str(increase_resized) + "\n"
s += "Decreasing patterns after block resizing: " + str(decrease_resized) + "\n"
s += "Total p(gt|st) for resized increasing site patterns: " + str(calculate_total_term_prob(patterns_pgS, increase_resized)) + "\n"
s += "Total p(gt|st) for resized decreasing site patterns: " + str(calculate_total_term_prob(patterns_pgS, decrease_resized)) + "\n"
s += "Statistic using block resizing: " + str(generate_statistic_string((increase_resized, decrease_resized))) + "\n"
s += "\n"
s += "\n"
s += "Information for each file: " + "\n"
s += display_alignment_info(alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff)
print s
elif verbose and statistic:
s += "Taxa order used for site patterns: " + str(taxa) + "\n"
s += "\n"
s += "Patterns that were formerly equal with increasing probability: " + str(increase) + "\n"
s += "Patterns that were formerly equal with decreasing probability: " + str(decrease) + "\n"
s += "\n"
s += "Statistic: " + str(generate_statistic_string((increase, decrease))) + "\n"
s += "\n"
s += "Information for each file: " + "\n"
n += "Information for each file: " + "\n"
for alignment in alignments_to_d_resized:
l_stat, significant, left_counts, right_counts, num_ignored, chisq, pval = alignments_to_d_resized[alignment]
s += alignment + ": "
n += alignment + ": " + "\n"
s += "\n"
s += "Final Overall D value using Block Resizing Method: {0}".format(l_stat) + "\n"
s += "Significant deviation from 0: {0}".format(significant) + "\n"
s += "Overall Chi-Squared statistic: " + str(chisq) + "\n"
s += "Overall p value: " + str(pval) + "\n"
s += "Number of site ignored due to \"N\" or \"-\": {0}".format(num_ignored) + "\n"
s += "\n"
s += "Left term counts: " + "\n"
for pattern in left_counts:
s += pattern + ": {0}".format(left_counts[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_d[alignment]) + "\n"
s += "\n"
s += "Final Overall D value {0}".format(l_stat) + "\n"
s += "Significant deviation from 0: {0}".format(significant) + "\n"
n += "Final Overall D value {0}".format(l_stat) + "\n"
n += "Significant deviation from 0: {0}".format(significant) + "\n"
print s
else:
for alignment in alignments_to_d_resized:
l_stat_r, significant_r = alignments_to_d_resized[alignment]
l_stat_pc, significant_pc = alignments_to_d_pattern_coeff[alignment]
l_stat_oc, significant_oc = alignments_to_d_ovr_coeff[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_d[alignment]) + "\n"
s += "\n"
s += "Final Overall D value using Block Resizing Method: {0}".format(l_stat_r) + "\n"
s += "Significant deviation from 0: {0}".format(significant_r) + "\n"
s += "\n"
s += "Final Overall D value using Pattern Coefficient Method: {0}".format(l_stat_pc) + "\n"
s += "Significant deviation from 0: {0}".format(significant_pc) + "\n"
s += "\n"
s += "Final Overall D value using Overall Coefficient Method: {0}".format(l_stat_oc) + "\n"
s += "Significant deviation from 0: {0}".format(significant_oc) + "\n"
print s
if plot:
plot_formatting((alignments_to_d_resized, alignments_to_windows_to_d), plot, meta)
return alignments_to_d_resized, alignments_to_windows_to_d, n, s
def display_alignment_info(alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff):
"""
Print information for an alignment to D mapping
Inputs:
alignments_to_d_resized --- a mapping of alignment files to their D information using block resizing
alignments_to_d_pattern_coeff --- a mapping of alignment files to their D information using pattern coefficient
alignments_to_d_ovr_coeff --- --- a mapping of alignment files to their D information using overall coefficient
Output:
s --- the output string
"""
s = ""
n = ""
for alignment in alignments_to_d_resized:
# Get the information for each alignment file
l_stat, significant, left_counts_res, right_counts_res, num_ignored, chisq, pval = alignments_to_d_resized[alignment]
output_resized = [("Final Overall D value using Block Resizing method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
l_stat, significant, left_counts_pcoeff, right_counts, num_ignored, chisq, pval = alignments_to_d_pattern_coeff[alignment]
output_pattern_coeff = [("Final Overall D value using Pattern Weighting method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
l_stat, significant, left_counts_ocoeff, right_counts, num_ignored, chisq, pval, coeff = alignments_to_d_ovr_coeff[alignment]
output_overall_coeff = [("Final Overall D value using Overall Weighting method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
# Create the output string
s += "\n"
s += "\n"
s += alignment + ": "
s += "\n"
n += "\n" + "\n" + alignment + ": " + "\n"
# Print output for resizing method
for output in output_resized:
s += str(output[0]) + str(output[1]) + "\n"
n += str(output[0]) + str(output[1]) + "\n"
s += "Left term counts: " + "\n"
for pattern in left_counts_res:
s += pattern + ": {0}".format(left_counts_res[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts_res:
s += pattern + ": {0}".format(right_counts_res[pattern]) + "\n"
s += "\n"
s += "\n"
# Print output for pattern coefficient method
for output in output_pattern_coeff:
s += str(output[0]) + str(output[1]) + "\n"
s += "Left term counts weighted by pattern probability: " + "\n"
for pattern in left_counts_pcoeff:
s += pattern + ": {0}".format(left_counts_pcoeff[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
s += "\n"
s += "\n"
# Print output for overall coefficient method
for output in output_overall_coeff:
s += str(output[0]) + str(output[1]) + "\n"
s += "Overall Coefficient for weighting: {0}".format(coeff) + "\n"
s += "Left term counts after weighting: " + "\n"
for pattern in left_counts_ocoeff:
s += pattern + ": {0}".format(left_counts_ocoeff[pattern] * coeff) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
return s
def plot_formatting(info_tuple, name, meta):
"""
Reformats and writes the dictionary output to a text file to make plotting it in Excel easy
Input:
info_tuple --- a tuple from the calculate_generalized output
"""
alignments_to_d, alignments_to_windows_to_d = info_tuple
num = 0
file_name = "{0}_{1}.txt".format(name, num)
while os.path.exists(file_name):
num += 1
file_name = "{0}_{1}.txt".format(name, num)
with open(file_name, "w") as text_file:
for alignment in alignments_to_d:
l_stat, significant = alignments_to_d[alignment][0], alignments_to_d[alignment][1]
significant = str(significant).upper()
windows_to_l = alignments_to_windows_to_d[alignment]
output_str = "{0}, {1}, {2} \n".format(l_stat, meta, significant)
text_file.write(output_str)
text_file.close()
if __name__ == '__main__':
r = [('P3', 'P2')]
species_tree = '(((P1,P2),P3),O);'
# species_tree = '((P1,P2),(P3,O));'
# species_tree = '(((P1,P2),(P3,P4)),O);' # DFOIL tree
# species_tree = '((((P1,P2),P3),P4),O);' # Smallest asymmetrical tree
# species_tree = '(((P1,P2),(P3,(P4,P5))),O);'
# n = '((P2,(P1,P3)),O);'
# n = '(((P1,P3),P2),O);'
# n = '((P1,(P2,(P3,P4))),O);'
# t = ["P1", "P2", "P3", "P4", "O"]
# o = "O"
# print site_pattern_generator(t, n, o, False)
if platform == "darwin":
alignments = ["/Users/Peter/PycharmProjects/ALPHA/exampleFiles/seqfile.txt"]
else:
alignments = ["C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim2\\seqfile.txt"]
# alignments = ["C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim5\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim7\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim4\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim6\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim8\\seqfile"]
print calculate_generalized(alignments, species_tree, r, "O", 500000, 500000,
alpha=0.01, verbose=False, use_inv=False)
# alignments = ["C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames"]
#
#
# species_tree, r = '((((P1,P4),P3),P2),O);', [('P3', 'P2'),('P1', 'P2')]
#
# # 3 to 2
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2')], 50000, 50000, True, save=True, f='stat_6tax_sub_3to2.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_3to2.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # 4 to 3
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P4', 'P3')], 50000, 50000, True, save=True, f='stat_6tax_sub_4to3.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P4', 'P3')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_4to3.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # both
# calculate_generalized(['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2'),('P4', 'P3')], 50000, 50000, True, save=True, f='stat_6tax_sub_3to2_4to3.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2'),('P4', 'P3')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_3to2_4to3.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# species_tree, r = "(((P5,P6),((P1,P2),P3)),P4);", [('P3', 'P2')]
# alignments = ["C:\\Users\\travi\\Desktop\\MosquitoConcat.phylip.txt"]
# species_tree, r = '((C,G),(((A,Q),L),R));', [('Q', 'G')]
# print calculate_generalized(alignments, 500000, 500000, statistic="C:\\Users\\travi\\Desktop\\stat_mosqSubset.txt", alpha=0.01, verbose=False, use_inv=False)
# alignments = ["C:\\Users\\travi\\Desktop\\MosquitoConcat.phylip.txt"]
# alignments = ["C:\\Users\\travi\\Desktop\\3L\\3L\\3L.41960870.634.fa.phylip"]
#
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('Q', 'G')], 50000, 50000, True, save=True, f='stat_QuaToGam.txt')
# print "Q to G done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(alignments, '((C,G),(((A,Q),L),R));', [('Q', 'G')], 50000, 50000, True, save=True, f='stat_inv_QuaToGam.txt', use_inv=True)
# print "Q to G done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # next generate Q to R, the bottom right one in dingqiaos fig
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('Q', 'R')], 50000, 50000, True, save=True, f='stat_QuaToMer.txt')
# print "Q to R done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('Q', 'R')], 50000, 50000, True, save=True, f='stat_inv_QuaToMer.txt', use_inv=True)
# print "Q to R done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # last generate L to R, the top right one in dingqiaos fig
# calculate_generalized(alignments, '((C,G),(((A,Q),L),R));', [('L', 'R')], 50000, 50000, True, save=True, f='stat_MelToMer.txt')
# print "L to R done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('L', 'R')], 50000, 50000, True, save=True, f='stat_inv_MelToMer.txt', use_inv=True)
# print "L to R done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=False, save=False,
# verbose=True, use_inv=False)
# s = "C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_85.txt"
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=s,
# verbose=True, use_inv=False)
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=False, save=False,
# verbose=True, use_inv=False)
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=False, save=False,
# verbose=True, use_inv=False)
# calculate_generalized(alignments, species_tree, r, 500000, 500000, True, 0.01, statistic=False, save=True, f="C:\\Users\\travi\\Documents\\ALPHA\\ABBABABATest2")
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic="C:\\Users\\travi\\Documents\\ALPHA\\ABBABABATest2.txt", verbose=True)
#
# save_file = "C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_11.txt"
# plot_formatting(calculate_generalized(alignments, statistic=save_file, verbose=True))
# python -c"from CalculateGeneralizedDStatistic import *; plot_formatting(calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 100000, 100000, True, 0.01), True)"
# Uncomment this to speed up 6 taxa debugging
# trees_of_interest = set(['BBABBA', 'ABBBAA', 'BABBBA', 'ABBABA', 'ABBBBA', 'AAABAA', 'ABAABA', 'BBBABA', 'BABABA', 'ABBAAA',
# 'BAAABA', 'ABABAA', 'BABBAA', 'BAAAAA', 'BBBBAA', 'ABABBA', 'BAABBA', 'AABAAA', 'BAABAA', 'BABAAA',
# 'ABAAAA', 'AAAABA'])
# trees_to_equality = {'BBABBA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABBBAA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BABBBA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA']),
# 'AABBAA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']),
# 'AAABAA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'BBBABA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABBAAA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'BBAABA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']),
# 'BABAAA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BAAAAA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA']),
# 'AABABA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']),
# 'BBBBAA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABABBA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BAABAA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BABBAA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'AAAABA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'AABBBA': set(['BBAAAA', 'AABBBA']),
# 'ABAABA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'ABBBBA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA']), 'BBBAAA': set(['AAABBA', 'BBBAAA']),
# 'ABBABA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BBABAA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']), 'AAABBA': set(['AAABBA', 'BBBAAA']),
# 'BAAABA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BBAAAA': set(['BBAAAA', 'AABBBA']),
# 'ABABAA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'BABABA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'BAABBA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'AABAAA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABAAAA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA'])}
# patterns_pgN = {'BBABBA': 0.032771235848126294, 'ABBBAA': 0.02098066450471356, 'BABBBA': 0.161652195191427,
# 'AABBAA': 0.03153707911255491, 'AAABAA': 0.1777623151093396, 'BBBABA': 0.1777623151093396,
# 'ABBAAA': 0.014809880826856624, 'BBAABA': 0.03153707911255491, 'BABAAA': 0.63719275136487,
# 'BAAAAA': 0.016661115930213705, 'AABABA': 0.03153707911255492, 'BBBBAA': 0.1777623151093396,
# 'ABABBA': 0.63719275136487, 'BAABAA': 0.02098066450471356, 'BABBAA': 0.15980096008806993,
# 'AAAABA': 0.1777623151093396, 'AABBBA': 0.08944867415584207, 'ABAABA': 0.15980096008806993,
# 'ABBBBA': 0.016661115930213705, 'BBBAAA': 0.2180376149041211, 'ABBABA': 0.02098066450471356,
# 'BBABAA': 0.03153707911255492, 'AAABBA': 0.2180376149041211, 'BAAABA': 0.02098066450471356,
# 'BBAAAA': 0.08944867415584207, 'ABABAA': 0.15980096008806996, 'BABABA': 0.15980096008806996,
# 'BAABBA': 0.014809880826856624, 'AABAAA': 0.032771235848126294, 'ABAAAA': 0.161652195191427}
# patterns_pgS = {'BBABBA': 0.11019080921752063, 'ABBBAA': 0.037037004438901525, 'BABBBA': 0.029411738819127668,
# 'AABBAA': 0.10801216189758524, 'AAABAA': 0.11019080921752065, 'BBBABA': 0.11019080921752065,
# 'ABBAAA': 0.026143767839224594, 'BBAABA': 0.10801216189758524, 'BABAAA': 0.03703700443890152,
# 'BAAAAA': 0.029411738819127668, 'AABABA': 0.10801216189758527, 'BBBBAA': 0.11019080921752064,
# 'ABABBA': 0.03703700443890152, 'BAABAA': 0.03703700443890151, 'BABBAA': 0.026143767839224594,
# 'AAAABA': 0.11019080921752064, 'AABBBA': 0.38034805363207147, 'ABAABA': 0.026143767839224594,
# 'ABBBBA': 0.029411738819127668, 'BBBAAA': 0.1303855768171189, 'ABBABA': 0.03703700443890151,
# 'BBABAA': 0.10801216189758527, 'AAABBA': 0.1303855768171189, 'BAAABA': 0.037037004438901525,
# 'BBAAAA': 0.38034805363207147, 'ABABAA': 0.026143767839224594, 'BABABA': 0.026143767839224594,
# 'BAABBA': 0.026143767839224594, 'AABAAA': 0.11019080921752063, 'ABAAAA': 0.029411738819127668}
# Debug for CL introgression file
# trees_of_interest = set(['ABBBAA', 'ABAABA', 'AABBAA', 'BBBAAA', 'ABBABA', 'BBABAA', 'BABABA', 'AAABBA', 'BBAABA', 'BAAABA', 'ABABAA',
# 'AABABA', 'ABABBA', 'BAABBA', 'ABBAAA', 'BAABAA', 'BABAAA', 'BABBAA'])
# trees_to_equality = {'BBABBA': set(['BBABBA']), 'ABBBAA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BABBBA': set(['BABBBA']), 'AABBAA': set(['AABABA', 'AAABBA', 'AABBAA']), 'BBBABA': set(['BBBABA']),
# 'ABBAAA': set(['ABABAA', 'ABBAAA', 'ABAABA']), 'BBAABA': set(['BBBAAA', 'BBABAA', 'BBAABA']),
# 'BABAAA': set(['BABAAA', 'BAAABA', 'BAABAA']), 'AABABA': set(['AABABA', 'AAABBA', 'AABBAA']),
# 'BBBBAA': set(['BBBBAA']), 'ABABBA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BAABAA': set(['BABAAA', 'BAAABA', 'BAABAA']),
# 'BABBAA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']), 'AABBBA': set(['AABBBA']),
# 'ABAABA': set(['ABABAA', 'ABBAAA', 'ABAABA']), 'ABBBBA': set(['ABBBBA']),
# 'BBBAAA': set(['BBBAAA', 'BBABAA', 'BBAABA']),
# 'ABBABA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BBABAA': set(['BBBAAA', 'BBABAA', 'BBAABA']), 'AAABBA': set(['AABABA', 'AAABBA', 'AABBAA']),
# 'BAAABA': set(['BABAAA', 'BAAABA', 'BAABAA']), 'BBAAAA': set(['BBAAAA']),
# 'ABABAA': set(['ABABAA', 'ABBAAA', 'ABAABA']),
# 'BABABA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BAABBA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA'])}
# patterns_pgN = {'BBABBA': 0.25178403007053934, 'ABBBAA': 0.00925617551678539, 'BABBBA': 0.14956960525299257,
# 'AABBAA': 0.011432470392906461, 'BBBABA': 0.1888697257294821, 'ABBAAA': 0.006170783677856928,
# 'BBAABA': 0.025366295434697986, 'BABAAA': 0.22118908909853413, 'AABABA': 0.011432470392906461,
# 'BBBBAA': 0.2346987308343348, 'ABABBA': 0.11799948496269537, 'BAABAA': 0.0037024702067141564,
# 'BABBAA': 0.1542472547779987, 'AABBBA': 0.02133876545521984, 'ABAABA': 0.042418553493160246,
# 'ABBBBA': 0.011107410620142468, 'BBBAAA': 0.1703573746959113, 'ABBABA': 0.00925617551678539,
# 'BBABAA': 0.025366295434697986, 'AAABBA': 0.04768024020820978, 'BAAABA': 0.0037024702067141564,
# 'BBAAAA': 0.027867650083583044, 'ABABAA': 0.04241855349316025, 'BABABA': 0.15424725477799872,
# 'BAABBA': 0.00925617551678539}
# patterns_pgS = {'BBABBA': 0.09979995455763162, 'ABBBAA': 0.016339854899515373, 'BABBBA': 0.15058034441671714,
# 'AABBAA': 0.03326665151921054, 'BBBABA': 0.12979863509693912, 'ABBAAA': 0.010893236599676915,
# 'BBAABA': 0.09711892529790832, 'BABAAA': 0.006535941959806149, 'AABABA': 0.03326665151921054,
# 'BBBBAA': 0.15979731563624658, 'ABABBA': 0.016339854899515373, 'BAABAA': 0.006535941959806149,
# 'BABBAA': 0.016339854899515373, 'AABBBA': 0.0769241576983101, 'ABAABA': 0.010893236599676915,
# 'ABBBBA': 0.019607825879418447, 'BBBAAA': 0.09711892529790833, 'ABBABA': 0.016339854899515373,
# 'BBABAA': 0.09711892529790832, 'AAABBA': 0.03326665151921054, 'BAAABA': 0.006535941959806149,
# 'BBAAAA': 0.12770454755739558, 'ABABAA': 0.010893236599676915, 'BABABA': 0.016339854899515373,
# 'BAABBA': 0.016339854899515373}
|
"""Configuration for Invenio-Formatter."""
from __future__ import absolute_import, print_function
FORMATTER_BADGES_ALLOWED_TITLES = ['DOI']
"""List of allowed titles in badges."""
FORMATTER_BADGES_TITLE_MAPPING = {}
"""Mapping of titles."""
|
from __future__ import unicode_literals
test_data = [
{
"Transaction" : {
"transaction_date" : "2015-01-08",
"amount" : -1286.75,
"security_amount" : 4.0726,
"security_rate" : 413.68
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #1"
},
"TransactionType" : {
"name" : "Sälj"
},
"TransactionData" : {
"ISIN" : "SE0000000001",
"courtage" : 10.50
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-07",
"amount" : -1329.5,
"security_amount" : 15.1663,
"security_rate" : 222.17
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000002",
"courtage" : 20
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-07",
"amount" : -682.61,
"security_amount" : 0.8534,
"security_rate" : 1974
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #3"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000003",
"courtage" : 30.50
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-05",
"amount" : 2728.8,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning Januari"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000004",
"courtage" : 40
}
},
{
"Transaction" : {
"transaction_date" : "2014-12-08",
"amount" : -1144.98,
"security_amount" : 5.1423,
"security_rate" : 222.66
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000005",
"courtage" : 50.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-11-26",
"amount" : 2145.42,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning November"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000006",
"courtage" : 60
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-29",
"amount" : -863.81,
"security_amount" : 16.2254,
"security_rate" : 114.87
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #3"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000007",
"courtage" : 70.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-28",
"amount" : -862.99,
"security_amount" : 8.7321,
"security_rate" : 213.35
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000008",
"courtage" : 80
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-27",
"amount" : 2826.80,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning Oktober"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000009",
"courtage" : 90.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-02",
"amount" : -10218.04,
"security_amount" : 149.8263,
"security_rate" : 114.92
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #1"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE00000000010",
"courtage" : 100
}
},
]
|
import logging
from marshmallow import ValidationError, post_load
from marshmallow_jsonapi import Schema, fields
from timeswitch.auth.dao import User
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
LOGGER = logging.getLogger(__name__)
class AppError(Exception):
pass
def dasherize(text):
return text.replace('_', '-')
class UserSchema(Schema):
id = fields.String(dump_only=True, required=True)
name = fields.String(required=True)
password = fields.String(load_only=True, required=False, attribute="password_clear")
new_password = fields.String(load_only=True, required=False)
email = fields.Email(required=False)
last_loggin = fields.String(required=False)
privilege = fields.String(required=False)
@post_load
def make_user(self, data):
return User(**data)
def handle_error(self, exc, data):
raise ValidationError('An error occurred with input: {0} \n {1}'.format(data, str(exc)))
class Meta:
type_ = 'users'
# inflect = dasherize
|
from __future__ import absolute_import, unicode_literals
from random import shuffle
class Carta():
def __init__(self, numero, naipe):
self.numero = numero
self.naipe = naipe
def __repr__(self):
return '%s de %s' % (self.numero, self.naipe)
class Baralho():
def __init__(self):
self._cartas = [Carta(numero, naipe) for numero in 'As 1 2 3 4 5 6 7 8 9 10 Q J K'.split()
for naipe in 'Ouros Espadas Copas Paus'.split()]
def __getitem__(self, index):
return self._cartas[index]
def __setitem__(self, key, value):
self._cartas[key] = value
def __len__(self):
return len(self._cartas)
print Carta('As', 'Paus')
baralho = Baralho()
baralho[55] = Carta('As', 'Paus')
shuffle(baralho)
for carta in baralho:
print carta
print baralho[0]
class Vetor():
def __init__(self, x, y):
self.y = y
self.x = x
def __repr__(self):
return '(%s, %s)' % (self.x, self.y)
def __add__(self, other):
return Vetor(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x==other.x and self.y==other.y
vetor1 = Vetor(1, 1)
vetor2 = Vetor(1, 1)
print vetor1 + vetor2
print vetor1 == vetor2
|
__all__ = ("settings", "urls", "wsgi")
__version__ = "0.159.0"
|
import sys
from os import path
current_dir = path.dirname(__file__)
sys.path.insert(0, path.join(path.dirname(current_dir), 'wdom'))
|
import sys
import time
from pprint import pprint
import telepot
from telepot.namedtuple import StickerSet
TOKEN = sys.argv[1]
USER_ID = long(sys.argv[2])
STICKER_SET = sys.argv[3]
bot = telepot.Bot(TOKEN)
f = bot.uploadStickerFile(USER_ID, open('gandhi.png', 'rb'))
print 'Uploaded Gandhi'
bot.addStickerToSet(USER_ID, STICKER_SET, f['file_id'], u'\U0001f60a')
bot.addStickerToSet(USER_ID, STICKER_SET, open('lincoln.png', 'rb'), u'\U0001f60a')
print 'Added Gandhi and Lincoln to set'
s = bot.getStickerSet(STICKER_SET)
pprint(s)
ss = StickerSet(**s)
for s in ss.stickers:
bot.deleteStickerFromSet(s.file_id)
print 'Deleted', s.file_id
time.sleep(3) # throttle
s = bot.getStickerSet(STICKER_SET)
pprint(s)
|
from __future__ import unicode_literals
import unittest
from ship.datastructures import rowdatacollection as rdc
from ship.datastructures import dataobject as do
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
class RowDataCollectionTests(unittest.TestCase):
def setUp(self):
# Create some object to use and add a couple of rows
# create chainage in position 1
self.obj1 = do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3)
self.obj1.data_collection.append(0.00)
self.obj1.data_collection.append(3.65)
# Create elevation in position 2
self.obj2 = do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3)
self.obj2.data_collection.append(32.345)
self.obj2.data_collection.append(33.45)
# Create roughness in position 3
self.obj3 = do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=None, no_of_dps=3)
self.obj3.data_collection.append(0.035)
self.obj3.data_collection.append(0.035)
self.testcol = rdc.RowDataCollection()
self.testcol._collection.append(self.obj1)
self.testcol._collection.append(self.obj2)
self.testcol._collection.append(self.obj3)
def test_initCollection(self):
'''
'''
# Create a dummy collection
obj1 = do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3)
obj2 = do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3)
obj3 = do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)
localcol = rdc.RowDataCollection()
localcol._collection.append(obj1)
localcol._collection.append(obj2)
localcol._collection.append(obj3)
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
# Check that they're the same
col_eq, msg = self.checkCollectionEqual(localcol, col)
self.assertTrue(col_eq, 'rdc.RowDataCollection initialisation fail - ' + msg)
def test_bulkInitCollection(self):
objs = [
do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3),
do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3),
do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3),
]
col = rdc.RowDataCollection.bulkInitCollection(objs)
localcol = rdc.RowDataCollection()
localcol._collection.append(objs[0])
localcol._collection.append(objs[1])
localcol._collection.append(objs[2])
# Check they're the same
col_eq, msg = self.checkCollectionEqual(localcol, col)
self.assertTrue(col_eq, 'rdc.RowDataCollection initialisation fail - ' + msg)
def checkCollectionEqual(self, c1, c2):
'''Check the two given collections to make sure that they contain the same data.
@param c1: First rdc.RowDataCollection object
@param c2: Second rdc.RowDataCollection object
@return: True if they're equal False and reason if not.
'''
if not len(c1._collection) == len(c2._collection):
return False, 'Collections are different lengths'
for i in range(0, len(c1._collection)):
if not c1._collection[i].data_type == c2._collection[i].data_type:
return False, 'Collections have different data_types'
if not c1._collection[i].format_str == c2._collection[i].format_str:
return False, 'Collections have different format_str'
if not c1._collection[i].default == c2._collection[i].default:
return False, 'Collections have different default'
for j in range(0, len(c1._collection[i].data_collection)):
if not c1._collection[i].data_collection[j] == c1._collection[i].data_collection[j]:
return False, 'Collections have different data'
return True, ''
def test_indexOfDataObject(self):
"""Should return the corrent index of a particular ADataObject in colleciton."""
index1 = self.testcol.indexOfDataObject(rdt.CHAINAGE)
index2 = self.testcol.indexOfDataObject(rdt.ELEVATION)
index3 = self.testcol.indexOfDataObject(rdt.ROUGHNESS)
self.assertEquals(index1, 0)
self.assertEquals(index2, 1)
self.assertEquals(index3, 2)
def test_iterateRows(self):
"""Test generator for complete row as a list"""
testrows = [
[0.00, 32.345, 0.035],
[3.65, 33.45, 0.035],
]
i = 0
for row in self.testcol.iterateRows():
self.assertListEqual(row, testrows[i])
i += 1
def test_iterateRowsWithKey(self):
"""Test generator for a single DataObject"""
testrows = [
32.345,
33.45,
]
i = 0
for row in self.testcol.iterateRows(rdt.ELEVATION):
self.assertEqual(row, testrows[i])
i += 1
def test_rowAsDict(self):
"""Shoud return a row as a dict of single values."""
test_dict = {rdt.CHAINAGE: 0.00, rdt.ELEVATION: 32.345, rdt.ROUGHNESS: 0.035}
row = self.testcol.rowAsDict(0)
self.assertDictEqual(row, test_dict)
def test_rowAsList(self):
test_list = [0.00, 32.345, 0.035]
row = self.testcol.rowAsList(0)
self.assertListEqual(row, test_list)
def test_dataObject(self):
"""Should return the correct ADataObject."""
test_vals = [0.00, 3.65]
obj = self.testcol.dataObject(rdt.CHAINAGE)
self.assertEqual(obj.data_type, rdt.CHAINAGE)
for i, o in enumerate(obj):
self.assertEqual(o, test_vals[i])
def test_dataObjectAsList(self):
"""Should return the contents of a DataObject as a list."""
test_list = [0.00, 3.65]
obj_list = self.testcol.dataObjectAsList(rdt.CHAINAGE)
self.assertListEqual(obj_list, test_list)
def test_toList(self):
test_list = [
[0.00, 3.65],
[32.345, 33.45],
[0.035, 0.035]
]
row_list = self.testcol.toList()
self.assertListEqual(row_list, test_list)
def test_toDict(self):
test_dict = {
rdt.CHAINAGE: [0.00, 3.65],
rdt.ELEVATION: [32.345, 33.45],
rdt.ROUGHNESS: [0.035, 0.035],
}
row_dict = self.testcol.toDict()
self.assertDictEqual(row_dict, test_dict)
def test_addValue(self):
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
col._addValue(rdt.CHAINAGE, 2.5)
self.assertEqual(col._collection[0][0], 2.5)
def test_setValue(self):
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
col._collection[0].addValue(2.5)
self.assertEqual(col._collection[0][0], 2.5)
col._setValue(rdt.CHAINAGE, 3.5, 0)
self.assertEqual(col._collection[0][0], 3.5)
def test_getPrintableRow(self):
test_row = ' 0.000 32.345 0.035'
row = self.testcol.getPrintableRow(0)
self.assertEqual(row, test_row)
def test_updateRow(self):
new_row = {rdt.CHAINAGE: 0.1, rdt.ELEVATION: 40, rdt.ROUGHNESS: 0.06}
self.testcol.updateRow(new_row, 0)
row = self.testcol.rowAsDict(0)
self.assertDictEqual(row, new_row)
with self.assertRaises(IndexError):
self.testcol.updateRow(new_row, 3)
fake_row = {'fakekey': 4.3, 'andagain': 3454}
with self.assertRaises(KeyError):
self.testcol.updateRow(fake_row, 0)
def test_addRow(self):
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
new_row = {rdt.CHAINAGE: 3.0, rdt.ELEVATION: 41, rdt.ROUGHNESS: 0.06}
new_row2 = {rdt.CHAINAGE: 6.0, rdt.ELEVATION: 42, rdt.ROUGHNESS: 0.07}
new_row3 = {rdt.CHAINAGE: 10.0, rdt.ELEVATION: 43, rdt.ROUGHNESS: 0.08}
new_row4 = {rdt.CHAINAGE: 20.0, rdt.ELEVATION: 44, rdt.ROUGHNESS: 0.09}
# append and insert rows
col.addRow(new_row2)
col.addRow(new_row, 0)
# append and insert again
col.addRow(new_row4)
col.addRow(new_row3, 2)
row = col.rowAsDict(0)
row2 = col.rowAsDict(1)
row3 = col.rowAsDict(2)
row4 = col.rowAsDict(3)
self.assertDictEqual(row, new_row)
self.assertDictEqual(row2, new_row2)
fake_row = {59: 4.3}
with self.assertRaises(KeyError):
col.addRow(fake_row)
def test_numberOfRows(self):
self.assertEqual(self.testcol.numberOfRows(), 2)
def test_deleteRow(self):
test_list = [3.65, 33.45, 0.035]
self.testcol.deleteRow(0)
self.assertEqual(self.testcol.numberOfRows(), 1)
row = self.testcol.rowAsList(0)
self.assertListEqual(row, test_list)
|
"""
Django settings for busquecursos project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'hb&=!izzysndvyjd_i@2pdx^d&px8ty%1g3#&%l$k))lpo(dvf'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'busquecursos.urls'
WSGI_APPLICATION = 'busquecursos.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
class PortalMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
|
from snovault import upgrade_step
@upgrade_step('gene', '1', '2')
def gene_1_2(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5005
# go_annotations are replaced by a link on UI to GO
value.pop('go_annotations', None)
@upgrade_step('gene', '2', '3')
def gene_2_3(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-6228
if value.get('locations') == []:
value.pop('locations', None)
|
import numpy as np
import os,sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-vis') # 1 plot cropped point cloud
ap.add_argument('-refine') # 1 refine mesh
ap.add_argument('-clean') # 1 remove tmp files
if ap.parse_args().vis==None:
vis=0
else:
vis=int(ap.parse_args().vis)
if ap.parse_args().refine==None:
refine=0
else:
refine=int(ap.parse_args().refine)
if ap.parse_args().clean==None:
clean=0
else:
clean=int(ap.parse_args().clean)
z=np.linspace(.2, -.8, num=100)
y=np.linspace(-.625,.625, num=120)
grid=np.meshgrid(y,z)
x=np.zeros((len(z)*len(y),1),dtype=np.float)
dat_vert=np.hstack((x,grid[0].reshape(x.shape),grid[1].reshape(x.shape)))
wl=np.linspace(.12,.18,num=8); amp=.03125*np.sqrt(wl)
e=1.025; r=-.2
dip=70.; zcnt=-.35
omg=[ 0.82976173, 0.89624834, 0.03829284, -0.50016345, -1.06606012, 1.40505898, -1.24256034, 1.28623393]
L=dat_vert[1,:].max()-dat_vert[1,:].min()
zmax=z.max(); zmin=z.min()
for i in range(len(wl)):
phs=dat_vert[:,1]/wl[i]*np.pi+omg[i]
dat_vert[:,0]=dat_vert[:,0]+amp[i]*np.cos(phs)*(e*zmax-dat_vert[:,2])/(e*zmax-zmin)*np.exp(r*abs(phs)/np.pi)
dat_vert[:,0]=dat_vert[:,0]+(zcnt-dat_vert[:,2])*np.tan((90.-dip)/180.*np.pi)
def flt_patch(dat_vert,slope1,slope2,trunc1,trunc2,hlw,hup):
b1=-slope1*trunc1-.7
b2=-slope2*trunc2-.7
in_id=np.where(np.logical_and(dat_vert[:,2]-slope1*dat_vert[:,1]<b1, dat_vert[:,2]-slope2*dat_vert[:,1]<b2))[0]
out_id=np.setdiff1d(np.array(range(len(dat_vert)),dtype=np.int32),in_id)
x_shift=dat_vert[in_id,0]
# ridge patch
k=0
zup=dat_vert[:,2].max()
zlw=dat_vert[:,2].min()
for i in in_id:
r=abs(dat_vert[i,1]-.5*(trunc1+trunc2))
R=.5*((dat_vert[i,2]-b2)/slope2-(dat_vert[i,2]-b1)/slope1)
h=hlw+(dat_vert[i,2]-zlw)/(zup-zlw)*(hup-hlw)
x_shift[k]=x_shift[k]+np.cos(r/R*np.pi/2.)*h
k+=1
dat_vert=np.vstack((dat_vert[out_id,:],
np.hstack((x_shift.reshape(len(in_id),1),
dat_vert[in_id,1].reshape(len(in_id),1),
dat_vert[in_id,2].reshape(len(in_id),1)))))
return dat_vert
slope1=10.;slope2=-10.
trunc1=.1;trunc2=.6
hup=0.;hlw=.08
print omg
fout='F3D_syn.xyz'
f=open(fout,'w+')
np.savetxt(f,dat_vert,delimiter=' ', fmt='%.6f '*3)
f.close()
from subprocess import call
fin=fout
fout=fout.rsplit('.')[0]+'.stl'
mxl='xyz2stl.mlx'
call(['meshlabserver', '-i',fin,'-o',fout,'-s',mxl])
if clean==1: os.remove(fin)
fin=fout
if refine==1:
fout=fout.rsplit('.')[0]+'_dns.exo'
else:
fout=fout.rsplit('.')[0]+'.exo'
jou='F3D_tet.jou'
txt_jou=open(jou,'r')
txt_jou_tmp=open('tmp.jou','w+')
hf=0.0025 # fault grid length (0.0025 for ~100 m tet model, 0.003 for ~40 m)
hm=0.0075 # matrix grid length (0.0075 for ~100 m tet model, 0.010 for ~40 m)
for line in txt_jou:
line=line.strip('\r\n')
if 'import' in line.lower():
line='import stl "'+fin+'"'
if 'export' in line.lower():
line='export mesh "'+fout+'" dimension 3 overwrite'
if 'surface 46 94 95 97 size' in line.lower():
line='surface 46 94 95 97 size %0.6f' %(2*hf)
if 'volume all size' in line.lower():
line='volume all size %0.6f' %(2*hm)
txt_jou_tmp.write(line+'\n')
if 'mesh volume all' in line.lower() and refine==1:
txt_jou_tmp.write('refine volume all\n')
txt_jou.close();txt_jou_tmp.close()
call(['trelis','-nojournal','-nographics','tmp.jou'])
if clean==1: os.remove('tmp.jou')
dt_dyn=2E-5 #1E-5 for dns 100 m tet model, 8E-5 for 40 m tet, 8E-4 for ~1 m tet
import F3D_msh2inp
_=F3D_msh2inp.msh2inp(fout,dt_dyn)
if vis==1:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(dat_vert[:,0], dat_vert[:,1], dat_vert[:,2], c='b', marker='.')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([np.max(dat_vert[:,0])-np.min(dat_vert[:,0]),np.max(dat_vert[:,1])\
-np.min(dat_vert[:,1]), np.max(dat_vert[:,2])-np.min(dat_vert[:,2])]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten()
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten()
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten()
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w',)
plt.title('fault [km]')
plt.grid()
plt.show()
|
"""oclubs filters."""
from __future__ import absolute_import
from oclubs.filters.resfilter import ResFilter, ResFilterConverter
from oclubs.filters.clubfilter import ClubFilter, ClubFilterConverter
from oclubs.filters.roomfilter import RoomFilter, RoomFilterConverter
__all__ = ['ResFilter', 'ResFilterConverter',
'ClubFilter', 'ClubFilterConverter',
'RoomFilter', 'RoomFilterConverter']
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('benchmark', '0008_benchmarkdefinition_commit_keyword_updated'),
]
operations = [
migrations.AlterField(
model_name='benchmarkexecutionentry',
name='status',
field=models.IntegerField(choices=[(0, b'Ready'), (1, b'In_Progress'), (2, b'Finished'), (3, b'Finished_With_Errors')], default=0),
),
]
|
"""Creates beautiful visualizations of the publication database."""
import datetime
import sqlite3 as sql
import numpy as np
from astropy import log
from matplotlib import pyplot as plt
import matplotlib.patheffects as path_effects
import matplotlib as mpl
from matplotlib import style
import seaborn as sns
from kpub import PublicationDB
MISSIONS = ['k2']
SCIENCES = ['exoplanets', 'astrophysics']
output_fn = 'publications-per-year-k2.png'
db = PublicationDB()
first_year = 2014
barwidth = 0.75
extrapolate = True
current_year = datetime.datetime.now().year
palette = sns.color_palette(['#f1c40f', '#2980b9'])
style.use('../styles/black.mplstyle')
plt.rc('xtick.major', size=0)
plt.rc('ytick.major', size=0)
counts = {}
for mission in MISSIONS:
counts[mission] = {}
for year in range(first_year, current_year + 1):
counts[mission][year] = 0
cur = db.con.execute("SELECT year, COUNT(*) FROM pubs "
"WHERE mission = ? "
"AND year >= '2014' "
"GROUP BY year;",
[mission])
rows = list(cur.fetchall())
for row in rows:
counts[mission][int(row[0])] = row[1]
fig = plt.figure(figsize=(8, 4.5))
ax = fig.add_subplot(111)
plt.bar(np.array(list(counts['k2'].keys())) - 0.5*barwidth,
counts['k2'].values(),
label='K2',
facecolor=palette[0],
edgecolor='black',
width=barwidth)
if extrapolate:
now = datetime.datetime.now()
fraction_of_year_passed = float(now.strftime("%-j")) / 365.2425
current_total = (counts['k2'][current_year])
expected = (1/fraction_of_year_passed - 1) * current_total
plt.bar(current_year - 0.5*barwidth,
expected,
bottom=current_total,
label='Extrapolation',
facecolor='#34495e',
edgecolor='black',
width=barwidth)
plt.ylabel("Publications per year", fontsize=18)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
plt.xticks(range(first_year - 1, current_year + 1), fontsize=18)
plt.yticks(range(0, 151, 50), fontsize=18)
plt.xlim([first_year - 0.75*barwidth, current_year + 0.75*barwidth])
"""
plt.legend(bbox_to_anchor=(0.1, 1),
loc='upper left',
ncol=3,
borderaxespad=0.,
handlelength=0.8,
frameon=False,
fontsize=18)
"""
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.grid(axis='y')
n_pub = sum(counts['k2'].values())
plt.suptitle("K2 Contributed to "
"{} Publications So Far".format(n_pub),
fontsize=22)
plt.tight_layout(rect=(0, 0, 1, 0.92), h_pad=1.5)
log.info("Writing {}".format(output_fn))
plt.savefig(output_fn)
plt.close()
|
import sys, os
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
if ON_RTD:
from unittest.mock import MagicMock
MOCK_MODULES = ['h5py']
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
import bayespy as bp
html_theme = 'sphinx_rtd_theme'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'numpydoc',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autosummary',
'sphinxcontrib.tikz',
'sphinxcontrib.bayesnet',
'sphinxcontrib.bibtex',
'nbsphinx',
]
imgmath_image_format = 'svg'
if ON_RTD:
tikz_proc_suite = 'GhostScript'
else:
tikz_proc_suite = 'pdf2svg'
if ON_RTD:
# For some reason, RTD needs these to be set explicitly although they
# should have default values
math_number_all = False
numpydoc_show_class_members = False
todo_include_todos = True
import glob
autosummary_generate = glob.glob("*.rst") + glob.glob("*/*.rst") + glob.glob("*/*/*.rst") + glob.glob("*/*/*/*.rst")
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = "BayesPy"
copyright = bp.__copyright__
version = bp.__version__
release = bp.__version__
exclude_patterns = [
'**.ipynb_checkpoints'
]
pygments_style = 'sphinx'
tikz_latex_preamble = r"""
\usepackage{amsmath}
"""
html_title = "BayesPy v%s Documentation" % (version)
html_static_path = ['_static']
htmlhelp_basename = 'BayesPydoc'
latex_elements = {
'preamble': r'''
\usepackage{tikz}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{svg}
\usetikzlibrary{shapes}
\usetikzlibrary{fit}
\usetikzlibrary{chains}
\usetikzlibrary{arrows}
''',
'fontenc': ''
}
latex_documents = [
('index', 'BayesPy.tex', u'BayesPy Documentation',
u'Jaakko Luttinen', 'manual'),
]
man_pages = [
('index', 'bayespy', u'BayesPy Documentation',
[u'Jaakko Luttinen'], 1)
]
texinfo_documents = [
('index', 'BayesPy', u'BayesPy Documentation',
u'Jaakko Luttinen', 'BayesPy', 'One line description of project.',
'Miscellaneous'),
]
epub_title = u'BayesPy'
epub_author = bp.__author__
epub_publisher = bp.__author__
epub_copyright = bp.__copyright__
import matplotlib
matplotlib.use('agg')
|
import logging
from office365.sharepoint.helpers.utils import to_camel
logger = logging.getLogger(__name__)
class QueryStringBuilder:
"""class to map web-querystring dictionary to sharepoint-querystring"""
date_operators = ['ge', 'gt', 'le', 'lt']
mapping_operator = {
'gte': 'ge',
'gt': 'gt',
'lte': 'le',
'lt': 'lt',
'not': 'ne',
'contains': 'substringof'
}
search = []
filters = {}
def __init__(self, filters):
super().__init__()
if filters:
self.filters = filters
def get_filter_querystring(self):
filter_queries = []
for filter_name, filter_value in self.filters.items():
# operator
querystring_operator = filter_name.split('__')[-1]
operator = self.mapping_operator.get(querystring_operator, 'eq')
# filter
filter_name = to_camel(filter_name.split('__')[0])
if operator in self.date_operators:
values = ["{}T00:00:00Z".format(filter_value)] # 2016-03-26
query = ' or '.join([f"{filter_name} {operator} datetime'{value}'" for value in values])
elif operator == 'substringof':
values = filter_value.split(',')
query = ' or '.join([f"{operator}('{value}', {filter_name})" for value in values])
else:
values = filter_value.split(',')
query = ' or '.join([f"{filter_name} {operator} '{value}'" for value in values])
if len(values) > 1:
query = f'({query})'
filter_queries.append(query)
logger.info(query)
return str(" and ".join(filter_queries))
def get_querystring(self):
return self.get_filter_querystring() or ''
|
import unittest
import json
import time
from celery import current_app
from django.conf import settings
from django.utils import timezone
from ep.models import DPMeasurements, DeviceParameter
from ep.tasks import send_msg
from django.test import TestCase, modify_settings, override_settings
from ep.tests.static_factories import SiteFactory
from ep_secure_importer.controllers.secure_client import secure_site_name
__author__ = 'schien'
@override_settings(IODICUS_MESSAGING_HOST='messaging.iodicus.net')
class TaskTest(TestCase):
def test_messaging(self):
print(settings.IODICUS_MESSAGING_HOST)
# print(settings.BROKER_URL)
self.assertTrue(send_msg.delay(json.dumps({'test': 1})))
class LocalTaskTest(TestCase):
def test_messaging(self):
print(settings.IODICUS_MESSAGING_HOST)
print(settings.BROKER_URL)
self.assertTrue(send_msg.delay(json.dumps({'test': 1})))
class InfluxDBTest(TestCase):
@classmethod
def setUpTestData(cls):
SiteFactory.create(name=secure_site_name)
# @unittest.skip
def test_simple_add(self):
print(settings.INFLUXDB_HOST)
m = DPMeasurements(device_parameter=DeviceParameter.objects.first())
before = len(list(m.all()))
print(before)
m.add(time=timezone.now(), value=255)
m.add(time=timezone.now(), value=0)
m.add(time=timezone.now(), value=20.5)
time.sleep(5)
after = len(list(m.all()))
print(after)
self.assertTrue(before + 3 == after)
if __name__ == '__main__':
unittest.main()
|
from django.http import Http404, HttpResponse
from django.template.context_processors import csrf
from rest_framework.authentication import TokenAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.permissions import DjangoModelPermissions
from rest_framework.views import APIView
from .models import FrontendDeployment
dev = """
<!doctype html>
<html lang="en">
<head>
<title>Loading | Falmer</title>
</head>
<body class="FalmerSite">
<script type="text/javascript">window.CSRF = "{csrf_token}";</script>
<div class="FalmerAppRoot"></div>
<script type="text/javascript" src="http://localhost:8080/vendor.js"></script>
<script type="text/javascript" src="http://localhost:8080/devFonts.js"></script>
<script type="text/javascript" src="http://localhost:8080/main.js"></script>
<script type="text/javascript" src="http://localhost:8080/productionFonts.js"></script>
</body>
</html>
"""
def application_serve(request):
if request.is_ajax() is False:
try:
deployment = FrontendDeployment.objects.filter(enabled=True).latest('created_at')
except FrontendDeployment.DoesNotExist:
return HttpResponse(dev.format(csrf_token=csrf(request)['csrf_token']))
return HttpResponse(deployment.content.format(csrf_token=csrf(request)['csrf_token']))
raise Http404()
class FrontendAPI(APIView):
authentication_classes = [TokenAuthentication, ]
permission_classes = [DjangoModelPermissions, ]
queryset = FrontendDeployment.objects.none()
def post(self, request):
FrontendDeployment.objects.create(
content=request.data['contents'],
)
return HttpResponse(status=200)
|
import imp
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
VERBOSE = False
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
case.duration = (datetime.now() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def Starting(self):
print '1..%i' % len(self.cases)
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
command = basename(output.command[-1])
if output.UnexpectedOutput():
print 'not ok %i - %s' % (self._done, command)
for l in output.output.stderr.splitlines():
print '#' + l
for l in output.output.stdout.splitlines():
print '#' + l
else:
print 'ok %i - %s' % (self._done, command)
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
print ' ---'
print ' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000)
print ' ...'
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
if mode == 'debug':
name = 'out/Debug/node'
else:
name = 'out/Release/node'
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/node.exe')
else:
name = os.path.abspath('Release/node.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--use-http1", help="Pass --use-http1 switch to node",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet', 'gc']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
if options.use_http1:
def wrap(processor):
return lambda args: processor(args[:1] + ['--use-http1'] + args[1:])
processor = wrap(processor)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
import getopt
import json
import locale
import os
import re
import sys
from urllib import request, parse
import platform
import threading
from .version import __version__
from .util import log, sogou_proxy_server, get_filename, unescape_html
dry_run = False
force = False
player = None
sogou_proxy = None
sogou_env = None
cookies_txt = None
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def tr(s):
try:
s.encode(default_encoding)
return s
except:
return str(s.encode('utf-8'))[2:-1]
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
"""Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found).
"""
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def launch_player(player, urls):
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])', lambda x: chr(int(x.group(0)[2:], 16)), text)
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def filenameable(text):
"""Converts a string to a legal filename through various OSes.
"""
# All POSIX systems
text = text.translate({
0: None,
ord('/'): '-',
})
if platform.system() == 'Windows': # For Windows
text = text.translate({
ord(':'): '-',
ord('*'): '-',
ord('?'): '-',
ord('\\'): '-',
ord('\"'): '\'',
ord('<'): '-',
ord('>'): '-',
ord('|'): '-',
ord('+'): '-',
ord('['): '(',
ord(']'): ')',
})
else:
if text.startswith("."):
text = text[1:]
if platform.system() == 'Darwin': # For Mac OS
text = text.translate({
ord(':'): '-',
})
return text
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
def get_response(url, faker = False):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
def get_html(url, encoding = None, faker = False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
def get_decoded_html(url, faker = False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
req = request.Request(url, headers=headers)
if cookies_txt:
cookies_txt.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
response = request.urlopen(req)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)')
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker = False):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
size = int(response.headers['content-length'])
return size
def urls_size(urls):
return sum(map(url_size, urls))
def url_info(url, faker = False):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(request.Request(url))
headers = response.headers
type = headers['content-type']
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mpeg': 'mp3'
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker = False):
locations = []
for url in urls:
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(request.Request(url))
locations.append(response.url)
return locations
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False):
file_size = url_size(url, faker = faker)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = end = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
range_length = int(response.headers['content-length'])
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size: # Download finished
break
else: # Unexpected termination. Retry request
headers['Range'] = 'bytes=' + str(received) + '-'
response = request.urlopen(request.Request(url, headers = headers), None)
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
def url_save_chunked(url, filepath, bar, refer = None, is_part = False, faker = False):
if os.path.exists(filepath):
if not force:
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(os.path.getsize(filepath))
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if faker:
headers = fake_headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
break
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath))
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar_size = 40
percent = round(self.received * 100 / self.total_size, 1)
if percent > 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '=' * dots + plus
bar = '{0:>5}% ({1:>5}/{2:<5}MB) [{3:<40}] {4}/{5}'.format(percent, round(self.received / 1048576, 1), round(self.total_size / 1048576, 1), bar, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('?', '?' * 40, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls)
except:
import traceback
import sys
traceback.print_exc(file = sys.stdout)
pass
title = get_filename(title)
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size:
if not force and os.path.exists(filepath) and os.path.getsize(filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % tr(filepath))
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(filename))
url_save(url, filepath, bar, refer = refer, faker = faker)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker)
bar.done()
if not merge:
print()
return
if ext == 'flv':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, os.path.join(output_dir, title + '.mp4'))
else:
from .processor.join_flv import concat_flv
concat_flv(parts, os.path.join(output_dir, title + '.flv'))
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, os.path.join(output_dir, title + '.mp4'))
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, os.path.join(output_dir, title + '.mp4'))
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_urls_chunked(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
assert ext in ('ts')
title = get_filename(title)
filename = '%s.%s' % (title, 'ts')
filepath = os.path.join(output_dir, filename)
if total_size:
if not force and os.path.exists(filepath[:-3] + '.mkv'):
print('Skipping %s: file already exists' % tr(filepath[:-3] + '.mkv'))
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
parts = []
url = urls[0]
print('Downloading %s ...' % tr(filename))
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
url_save_chunked(url, filepath, bar, refer = refer, faker = faker)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_convert_ts_to_mkv
if ffmpeg_convert_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Conversion aborted.')
else:
print("Can't convert %s files" % ext)
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save_chunked(url, filepath, bar, refer = refer, is_part = True, faker = faker)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
if ffmpeg_concat_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Merging aborted.')
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(url, playpath, title, ext, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
print('Real Playpath:\n%s\n' % [playpath])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, playpath)
return
from .processor.rtmpdump import has_rtmpdump_installed, download_rtmpdump_stream
assert has_rtmpdump_installed(), "RTMPDump not installed."
download_rtmpdump_stream(url, playpath, title, ext, output_dir)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size):
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
if type in ['video/3gpp']:
type_info = "3GPP multimedia file (%s)" % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = "Flash video (%s)" % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = "MPEG-4 video (%s)" % type
elif type in ['video/MP2T']:
type_info = "MPEG-2 transport stream (%s)" % type
elif type in ['video/webm']:
type_info = "WebM video (%s)" % type
#elif type in ['video/ogg']:
# type_info = "Ogg video (%s)" % type
elif type in ['video/quicktime']:
type_info = "QuickTime video (%s)" % type
elif type in ['video/x-matroska']:
type_info = "Matroska video (%s)" % type
#elif type in ['video/x-ms-wmv']:
# type_info = "Windows Media video (%s)" % type
elif type in ['video/x-ms-asf']:
type_info = "Advanced Systems Format (%s)" % type
#elif type in ['video/mpeg']:
# type_info = "MPEG video (%s)" % type
elif type in ['audio/mpeg']:
type_info = "MP3 (%s)" % type
else:
type_info = "Unknown type (%s)" % type
print("Video Site:", site_info)
print("Title: ", unescape_html(tr(title)))
print("Type: ", type_info)
print("Size: ", round(size / 1048576, 2), "MiB (" + str(size) + " Bytes)")
print()
def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def get_sogou_proxy():
return sogou_proxy
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def set_http_proxy(proxy):
if proxy == None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def download_main(download, download_playlist, urls, playlist, output_dir, merge, info_only):
for url in urls:
if url.startswith('https://'):
url = url[8:]
if not url.startswith('http://'):
url = 'http://' + url
if playlist:
download_playlist(url, output_dir = output_dir, merge = merge, info_only = info_only)
else:
download(url, output_dir = output_dir, merge = merge, info_only = info_only)
def get_version():
try:
import subprocess
real_dir = os.path.dirname(os.path.realpath(__file__))
git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=real_dir, stderr=subprocess.DEVNULL).decode('utf-8').strip()
assert git_hash
return '%s-%s' % (__version__, git_hash)
except:
return __version__
def script_main(script_name, download, download_playlist = None):
version = 'You-Get %s, a video downloader.' % get_version()
help = 'Usage: %s [OPTION]... [URL]...\n' % script_name
help += '''\nStartup options:
-V | --version Display the version and exit.
-h | --help Print this help and exit.
'''
help += '''\nDownload options (use with URLs):
-f | --force Force overwriting existed files.
-i | --info Display the information of videos without downloading.
-u | --url Display the real URLs of videos without downloading.
-c | --cookies Load NetScape's cookies.txt file.
-n | --no-merge Don't merge video parts.
-o | --output-dir <PATH> Set the output directory for downloaded videos.
-p | --player <PLAYER [options]> Directly play the video with PLAYER like vlc/smplayer.
-x | --http-proxy <HOST:PORT> Use specific HTTP proxy for downloading.
--no-proxy Don't use any proxy. (ignore $http_proxy)
-S | --sogou Use a Sogou proxy server for downloading.
--sogou-proxy <HOST:PORT> Run a standalone Sogou proxy server.
--debug Show traceback on KeyboardInterrupt.
'''
short_opts = 'Vhfiuc:nSo:p:x:'
opts = ['version', 'help', 'force', 'info', 'url', 'cookies', 'no-merge', 'no-proxy', 'debug', 'sogou', 'output-dir=', 'player=', 'http-proxy=', 'sogou-proxy=', 'sogou-env=']
if download_playlist:
short_opts = 'l' + short_opts
opts = ['playlist'] + opts
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError as err:
log.e(err)
log.e("try 'you-get --help' for more options")
sys.exit(2)
global force
global dry_run
global player
global sogou_proxy
global sogou_env
global cookies_txt
cookies_txt = None
info_only = False
playlist = False
merge = True
output_dir = '.'
proxy = None
traceback = False
for o, a in opts:
if o in ('-V', '--version'):
print(version)
sys.exit()
elif o in ('-h', '--help'):
print(version)
print(help)
sys.exit()
elif o in ('-f', '--force'):
force = True
elif o in ('-i', '--info'):
info_only = True
elif o in ('-u', '--url'):
dry_run = True
elif o in ('-c', '--cookies'):
from http import cookiejar
cookies_txt = cookiejar.MozillaCookieJar(a)
cookies_txt.load()
elif o in ('-l', '--playlist'):
playlist = True
elif o in ('-n', '--no-merge'):
merge = False
elif o in ('--no-proxy',):
proxy = ''
elif o in ('--debug',):
traceback = True
elif o in ('-o', '--output-dir'):
output_dir = a
elif o in ('-p', '--player'):
player = a
elif o in ('-x', '--http-proxy'):
proxy = a
elif o in ('-S', '--sogou'):
sogou_proxy = ("0.0.0.0", 0)
elif o in ('--sogou-proxy',):
sogou_proxy = parse_host(a)
elif o in ('--sogou-env',):
sogou_env = a
else:
log.e("try 'you-get --help' for more options")
sys.exit(2)
if not args:
if sogou_proxy is not None:
try:
if sogou_env is not None:
server = sogou_proxy_server(sogou_proxy, network_env=sogou_env)
else:
server = sogou_proxy_server(sogou_proxy)
server.serve_forever()
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit()
else:
print(help)
sys.exit()
set_http_proxy(proxy)
try:
download_main(download, download_playlist, args, playlist, output_dir, merge, info_only)
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit(1)
|
import math
import scipy.optimize as opt
log = math.log
exp = math.exp
small = 1e-20 # unitless
T0 = 1 # K
Tcrit = 650 # K
zero_C = 273.15 # K
p0 = 1 # Pa
atm = 101325 # Pa
bar = 100000 # Pa
R = 8.314510 # J / mol / K 1-54
Mw_ = 0.01801528 # kg / mol 6-4 molecular weight of water
vap_T = zero_C # K 6-3
vap_enthalpy = 45054 # J / mol 6-3
air_T = 300 # K 6-1
air_cp = 1007 # J / kg / K 6-1
air_rho = 1.161 # kg / m^3 6-1
lw_T = 10 + zero_C # K 6-3
lw_cp = 4192.1 # J / kg / K 6-3
sat_T = 10 + zero_C # K 6-10
sat_p_star = 1228.1 # Pa 6-10
Md_ = air_rho * R * air_T / bar # kg / mol molecular weight of air
cd_ = air_cp * Md_ # J / mol / K heat capacity of air, constant pressure
cv_ = 1870 * Mw_ # J / mol / K heat capacity of water vapor, constant p
cl_ = lw_cp * Mw_ # J / mol / K heat capacity of liquid water, constant p
cd = cd_ / R # unitless
cv = cv_ / R # unitless
cl = cl_ / R # unitless
Md = Md_ / R # kg K / J
Mw = Mw_ / R # kg K / J
epsilon = Mw_ / Md_ # unitless
Lc = vap_enthalpy / R + (cl - cv) * vap_T # K
Tc = sat_T # K
pc = sat_p_star * exp(Lc / Tc) # Pa
def compute_p_star(T):
return pc * exp((cv - cl) * log(T / Tc) - Lc / T)
def compute_y_s(p, p_star):
return p_star / (p - p_star)
def compute_y_s_from_T(p, T):
return compute_y_s(p, compute_p_star(T))
def compute_ell(T):
return cv - cl + Lc / T
def compute_issat_ypT(y, p, T):
y_s = compute_y_s_from_T(p, T)
return (y_s > 0) and (y > y_s)
def compute_issat_yps(y, p, s):
return compute_issat_ypT(y, p, compute_T_unsat(y, p, s))
def compute_M(y):
return Md * (1 + epsilon * y)
def compute_Ms_unsat(y, p, T):
if y < small:
return cd * log(T / T0) - log(p / p0)
else:
return ((cd + y * cv) * log(T / T0)
- (1 + y) * log(p / p0)
+ (1 + y) * log(1 + y)
- y * log(y))
def compute_Ms_sat(y, p, T):
p_star = compute_p_star(T)
y_s = compute_y_s(p, p_star)
ell = compute_ell(T)
if y < small:
# Unlikely to represent a physical situation,
# since y > y_s for saturated parcels.
return cd * log(T / T0) - log(p_star / p0) + log(y_s) + y_s * ell
else:
return ((cd + y * cv) * log(T / T0)
- (1 + y) * log(p_star / p0)
+ log (y_s)
+ (y_s - y) * ell)
def compute_T_unsat(y, p, s):
Ms = compute_M(y) * s
if y < small:
return T0 * exp((Md * s + log(p / p0)) / cd)
else:
return T0 * exp(
(Ms + (1 + y) * log(p / p0) - (1 + y) * log(1 + y) + y * log(y))
/ (cd + y * cv)
)
def compute_T_sat(y, p, s):
if y < 1e-10 or p > 1e10:
raise ValueError()
#
# Equation we wish to solve:
# M * s = (cd + y*cv) * log(T / T0) - (1 + y)*log(p_star / p0) + log(y_s) + (y_s - y) * ell
# where
# p_star is a function of T
# y_s = p_star / (p - p_star)
# ell = cv - cl + Lc / T
#
# Note that for T < Tcrit, ell > 0 and d p_star/dT > 0.
#
# Let
# f(T) = c0 * log(T) - (1 + y) * log(p_star) + log(y_s) + (y_s - y) * ell + c1
# = c0 * log(T) - y * log(p_star) - log(p - p_star) + (y_s - y) * ell + c1
# = c0 * log(T) - y * ((cv - cl) log(T / Tc) - Lc / T) - log(p - p_star)
# + y_s * ell - y * (cv - cl) - y * Lc / T + c1 - y * log(pc)
# = c0 * log(T) - y * (cv - cl) * log(T) - log(p - p_star)
# + y_s * ell + c2
# = c3 * log(T) - log(p - p_star) + y_s * ell + c2
# where
# c0 = cd + y * cv
# c1 = - (cd + y * cv) * log(T0) + (1 + y) * log(p0) - compute_M(y) * s
# c2 = c1 - y * log(pc) - y * (cv - cl) + y * (cv - cl) * log(Tc)
# c3 = cd + y * cl
#
# Note that f(T) is increasing in T for reasonable values of p and T. We want to find
# where f(T) = 0.
#
c1 = - (cd + y * cv) * log(T0) + (1 + y) * log(p0) - compute_M(y) * s
c2 = c1 - y * log(pc) - y * (cv - cl) + y * (cv - cl) * log(Tc)
c3 = cd + y * cl
#
# Since the parcel is saturated we know that y_s < y, so
# p_star = p (y_s / (1 + y_s)) = p (1 - 1 / (1 + y_s)) < p (1 - 1 / (1 + y))
# so we have an upperbound on the value of p_star. Furthermore, since cv - cl < 0,
# p_star = pc exp((cv - cl) log(T / Tc) - Lc / T)
# > pc exp((cv - cl) log(Tcrit / Tc) - Lc / T)
# so
# -Lc / T < log(p_star / pc) + (cl - cv) log(Tcrit / Tc)
# Lc / T > -log(p_star / pc) + (cv - cl) log(Tcrit / Tc) [1]
# T < Lc / (-log(p_star / pc) + (cv - cl) log(Tcrit / Tc))
# T < Lc / (-log(p / pc) - log(y / (1 + y)) + (cv - cl) log(Tcrit / Tc))
# where we have used that the right side of [1] is positive for p_star smaller than 1e11 Pa
# or so.
#
c4 = (cv - cl) * log(Tcrit / Tc)
p_star_max = p * y / (1 + y)
Tmax = Lc / (c4 - log(p_star_max / pc))
Tmax = min(Tmax, Tcrit)
# Couldn't figure out a good way to lower bound it. 100 K is pretty safe.
Tmin = 100
def f(T):
p_star = compute_p_star(T)
if p_star >= p_star_max:
return T * 1.0e200
y_s = p_star / (p - p_star)
ell = cv - cl + Lc / T
return c3 * log(T) - log(p - p_star) + y_s * ell + c2
if f(Tmin) >= 0:
return Tmin
if f(Tmax) <= 0:
return Tmax
return opt.brentq(f, Tmin, Tmax)
def compute_Tv_sat(y, p, s):
T = compute_T_sat(y, p, s)
y_s = compute_y_s_from_T(p, T)
return T * (1 + y_s) / (1 + y * epsilon)
def compute_Tv_unsat(y, p, s):
return compute_T_unsat(y, p, s) * (1 + y) / (1 + y * epsilon)
def compute_Mh_unsat(y, p, s):
return (cd + y * cv) * compute_T_unsat(y, p, s)
def compute_Mh_sat(y, p, s):
T = compute_T_sat(y, p, s)
y_s = compute_y_s_from_T(p, T)
ell = compute_ell(T)
return (cd + y * cv + (y_s - y) * ell) * T
def compute_Mh_dp_unsat(y, p, s):
return (1 + y) * compute_T_unsat(y, p, s) / p
def compute_Mh_dp_sat(y, p, s):
T = compute_T_sat(y, p, s)
y_s = compute_y_s_from_T(p, T)
return (1 + y_s) * T / p
def compute_w(y):
return y * epsilon
def compute_y(w):
return w / epsilon
def molecular_weight_water():
return Mw_
def molecular_weight_dry_air():
return Md_
def molecular_weight_moist_air(y):
return (Md_ + y * Mw_) / (1 + y)
def saturation_vapor_pressure(T):
return p_star(T)
def relative_humidity(y, p, T):
y_s = compute_y_s_from_T(p, T)
if y > y_s:
return 1
else:
return y / y_s
def latent_heat_condensation(T):
return compute_ell(T) * R * T
def is_saturated(y, p, T):
return compute_issat_ypT(y, p, T)
def entropy(y, p, T):
if compute_issat_ypT(y, p, T):
return compute_Ms_sat(y, p, T) / compute_M(y)
else:
return compute_Ms_unsat(y, p, T) / compute_M(y)
def temperature(y, p, s):
if compute_issat_yps(y, p, s):
return compute_T_sat(y, p, s)
else:
return compute_T_unsat(y, p, s)
def virtual_temperature(y, p, s):
if compute_issat_yps(y, p, s):
return compute_Tv_sat(y, p, s)
else:
return compute_Tv_unsat(y, p, s)
def enthalpy(y, p, s):
if compute_issat_yps(y, p, s):
return compute_Mh_sat(y, p, s) / compute_M(y)
else:
return compute_Mh_unsat(y, p, s) / compute_M(y)
def enthalpy_dp(y, p, s):
if compute_issat_yps(y, p, s):
return compute_Mh_dp_sat(y, p, s) / compute_M(y)
else:
return compute_Mh_dp_unsat(y, p, s) / compute_M(y)
def new_temperature(y, Told, pold, pnew):
return temperature(y, pnew, entropy(y, pold, Told))
def change_in_enthalpy(y, Told, pold, pnew):
s = entropy(y, Told, pold)
return enthalpy(y, pnew, s) - enthalpy(y, pold, s)
|
""" Example usage of DbC (design by contract)
In this example we show you how to use pre- and post-condition checkers
decorating the same function.
"""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import DbC
DbC.ASSERTION_LEVEL = DbC.ASSERTION_ALL
from DbC import pre, post
def check_pre(*args):
'Pre-condition checker.'
# must have an even number of args
assert ( len(args) & 1 ) == 0, 'Expected an even number of arguments'
# all numbers must be non-negative ints
assert all(i>=0 and isinstance(i,int) for i in args), \
'Numbers must be positive integers'
# all second numbers must be < 10
assert all(i<10 for i in args[1::2]), 'Numbers must be < 10'
def check_post(*args):
'Post-condition checker.'
# return value from decorated function is always the last positional
# parameter
rval = args[-1]
# simple check of the number of items in the return
assert 2 * len(rval) == len(args) - 1
# check units
units_out = [i%10 for i in rval]
units_in = [i for i in args[1:-1:2]]
assert units_out == units_in
# check tens
tens_out = [i//10 for i in rval]
tens_in = [i for i in args[0:-1:2]]
assert tens_out == tens_in
@pre(check_pre)
@post(check_post)
def pairoff(*args):
'Make tens+units from pairs of numbers.'
it = iter(args)
return [10*a+b for a,b in zip(it,it)]
print( pairoff(*range(8)) )
print( pairoff(4,2, 10,1) )
try: # odd number of args
pairoff(1,2,3,4,5)
except AssertionError as e:
print(e)
try: # unit >= 10
pairoff(4,2, 9,10)
except AssertionError as e:
print(e)
try: # negative
pairoff(4,2, -1,2)
except AssertionError as e:
print(e)
try: # non-integer
pairoff(1.25,0.6)
except AssertionError as e:
print(e)
|
import django.db.models.deletion
import django.utils.timezone
import django_fsm
from django.conf import settings
from django.db import migrations, models
import apps.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Meeting",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date_created",
apps.core.models.DateTimeCreatedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
(
"date_modified",
apps.core.models.DateTimeModifiedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
("format", models.CharField(blank=True, max_length=50)),
("message", models.TextField(blank=True)),
("datetime", models.DateTimeField()),
("state", django_fsm.FSMField(default="available", max_length=50)),
(
"cancelled_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"mentor",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="mentors",
to=settings.AUTH_USER_MODEL,
),
),
(
"protege",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="proteges",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ("-datetime",)},
)
]
|
from datetime import date
from openpyxl import load_workbook
if __name__ == '__main__':
wb = load_workbook('FixedCouponBond.xlsx')
ws = wb.active
# Take the input parameters
today = ws['C2'].value.date()
# OIS Data
ois_startdate = today
ois_maturities = []
ois_mktquotes = []
for cell in list(ws.iter_rows('B15:C44')):
ois_maturities.append(cell[0].value)
ois_mktquotes.append(cell[1].value)
# Credit Curve Data
ndps = []
ndpdates = []
for cell in list(ws.iter_rows('B6:C11')):
ndpdates.append(cell[0].value.date())
ndps.append(cell[1].value)
# Bond data
nominals = []
start_dates = []
end_dates = []
cpn_frequency = []
coupons = []
recovery_rates = []
for cell in list(ws.iter_rows('E5:J19')):
nominals.append(cell[0].value)
start_dates.append(cell[1].value.date())
end_dates.append(cell[2].value.date())
cpn_frequency.append(cell[3].value)
coupons.append(cell[4].value)
recovery_rates.append(cell[5].value)
# YOUR CODE HERE ....
# In the coupon calculation use 30e360 convention to compute the accrual period (i.e. tau)
# The result of your code must be a variables of type list named
# output_npv. The length of this list has to be the equal to the number of bonds
# i.e len(nominals) for example
# END OF YOUR CODE
# Write results
# A variable named output_results of type list, with the same length of output_dates, is expected.
# In case this is not present, a message is written
if 'output_npv' not in locals():
output_npv = ["Not Successful" for x in range(len(nominals))]
out_list = list(ws.iter_rows('K5:K19'))
for i in range(len(output_npv)):
out_list[i][0].value = output_npv[i]
# A new file with the results is created
wb.save("FixedCouponBond_output.xlsx")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.