gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from time import sleep
import invoke
from invoke import run, Collection
from website import settings
from admin import tasks as admin_tasks
from utils import pip_install, bin_prefix
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
ns = Collection()
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, wait=True):
"""Run the API server."""
env = {"DJANGO_SETTINGS_MODULE": "api.base.settings"}
cmd = '{}={} exec {} manage.py runserver {} --nothreading'.format(env.keys()[0], env[env.keys()[0]], sys.executable, port)
if wait:
return run(cmd, echo=True, pty=True)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(port=8001):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {} --nothreading'.format(env, port)
run(cmd, echo=True, pty=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
{transaction}
Available variables:
{context}
"""
TRANSACTION_WARNING = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag.
"""
def make_shell_context(auto_transact=True):
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
from framework.transactions import commands
from framework.transactions import context as tcontext
app = init_app()
def commit():
commands.commit()
print('Transaction committed.')
if auto_transact:
commands.begin()
print('New transaction opened.')
def rollback():
commands.rollback()
print('Transaction rolled back.')
if auto_transact:
commands.begin()
print('New transaction opened.')
context = {
'transaction': tcontext.TokuTransaction,
'start_transaction': commands.begin,
'commit': commit,
'rollback': rollback,
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
if auto_transact:
commands.begin()
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell(transaction=True):
context = make_shell_context(auto_transact=transaction)
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context),
transaction=TRANSACTION_WARNING if transaction else ''
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug", hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(level="debug", schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.tasks -l {0}'.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
run(bin_prefix(cmd), pty=True)
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
inv requirements --metrics
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_api():
"""Run the API test suite."""
test_module(module="api_tests/")
@task
def test_admin():
"""Run the Admin test suite."""
# test_module(module="admin_tests/")
module = "admin_tests/"
verbosity = 0
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings" '
# Use pty so the process buffers "correctly"
run(env + bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_varnish():
"""Run the Varnish test suite."""
proc = apiserver(wait=False)
sleep(5)
test_module(module="api/caching/tests/test_caching.py")
proc.kill()
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
test_api()
test_admin()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_osf():
"""
Run half of the tests to help travis go faster
"""
flake()
jshint()
test_osf()
@task
def test_travis_else():
"""
Run other half of the tests to help travis go faster
"""
test_addons()
test_api()
test_admin()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_varnish():
test_varnish()
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False, metrics=False):
"""Install python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def build_js_config_files():
from website import settings
from website.app import build_js_config_files as _build_js_config_files
print('Building JS config files...')
_build_js_config_files(settings)
print("...Done.")
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
build_js_config_files()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
@task
def clean(verbose=False):
run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage():
run('invoke --list')
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import getpass
import os
import uuid
from cgroupspy import trees
import psutil
from airflow.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.helpers import reap_process_group
class CgroupTaskRunner(BaseTaskRunner):
"""
Runs the raw Airflow task in a cgroup that has containment for memory and
cpu. It uses the resource requirements defined in the task to construct
the settings for the cgroup.
Note that this task runner will only work if the Airflow user has root privileges,
e.g. if the airflow user is called `airflow` then the following entries (or an even
less restrictive ones) are needed in the sudoers file (replacing
/CGROUPS_FOLDER with your system's cgroups folder, e.g. '/sys/fs/cgroup/'):
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/* *
"""
def __init__(self, local_task_job):
super(CgroupTaskRunner, self).__init__(local_task_job)
self.process = None
self._finished_running = False
self._cpu_shares = None
self._mem_mb_limit = None
self._created_cpu_cgroup = False
self._created_mem_cgroup = False
self._cur_user = getpass.getuser()
def _create_cgroup(self, path):
"""
Create the specified cgroup.
:param path: The path of the cgroup to create.
E.g. cpu/mygroup/mysubgroup
:return: the Node associated with the created cgroup.
:rtype: cgroupspy.nodes.Node
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path)
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists",
path_element, node.path
)
node = name_to_node[path_element]
return node
def _delete_cgroup(self, path):
"""
Delete the specified cgroup.
:param path: The path of the cgroup to delete.
E.g. cpu/mygroup/mysubgroup
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
# node is now the leaf node
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name)
def start(self):
# Use bash if it's already in a cgroup
cgroups = self._get_cgroup_names()
if cgroups["cpu"] != "/" or cgroups["memory"] != "/":
self.log.debug(
"Already running in a cgroup (cpu: %s memory: %s) so not "
"creating another one",
cgroups.get("cpu"), cgroups.get("memory")
)
self.process = self.run_command()
return
# Create a unique cgroup name
cgroup_name = "airflow/{}/{}".format(datetime.datetime.utcnow().
strftime("%Y-%m-%d"),
str(uuid.uuid4()))
self.mem_cgroup_name = "memory/{}".format(cgroup_name)
self.cpu_cgroup_name = "cpu/{}".format(cgroup_name)
# Get the resource requirements from the task
task = self._task_instance.task
resources = task.resources
cpus = resources.cpus.qty
self._cpu_shares = cpus * 1024
self._mem_mb_limit = resources.ram.qty
# Create the memory cgroup
mem_cgroup_node = self._create_cgroup(self.mem_cgroup_name)
self._created_mem_cgroup = True
if self._mem_mb_limit > 0:
self.log.debug(
"Setting %s with %s MB of memory",
self.mem_cgroup_name, self._mem_mb_limit
)
mem_cgroup_node.controller.limit_in_bytes = self._mem_mb_limit * 1024 * 1024
# Create the CPU cgroup
cpu_cgroup_node = self._create_cgroup(self.cpu_cgroup_name)
self._created_cpu_cgroup = True
if self._cpu_shares > 0:
self.log.debug(
"Setting %s with %s CPU shares",
self.cpu_cgroup_name, self._cpu_shares
)
cpu_cgroup_node.controller.shares = self._cpu_shares
# Start the process w/ cgroups
self.log.debug(
"Starting task process with cgroups cpu,memory: %s",
cgroup_name
)
self.process = self.run_command(
['cgexec', '-g', 'cpu,memory:{}'.format(cgroup_name)]
)
def return_code(self):
return_code = self.process.poll()
# TODO(plypaul) Monitoring the the control file in the cgroup fs is better than
# checking the return code here. The PR to use this is here:
# https://github.com/plypaul/airflow/blob/e144e4d41996300ffa93947f136eab7785b114ed/airflow/contrib/task_runner/cgroup_task_runner.py#L43
# but there were some issues installing the python butter package and
# libseccomp-dev on some hosts for some reason.
# I wasn't able to track down the root cause of the package install failures, but
# we might want to revisit that approach at some other point.
if return_code == 137:
self.log.warning("Task failed with return code of 137. This may indicate "
"that it was killed due to excessive memory usage. "
"Please consider optimizing your task or using the "
"resources argument to reserve more memory for your task")
return return_code
def terminate(self):
if self.process and psutil.pid_exists(self.process.pid):
reap_process_group(self.process.pid, self.log)
def on_finish(self):
# Let the OOM watcher thread know we're done to avoid false OOM alarms
self._finished_running = True
# Clean up the cgroups
if self._created_mem_cgroup:
self._delete_cgroup(self.mem_cgroup_name)
if self._created_cpu_cgroup:
self._delete_cgroup(self.cpu_cgroup_name)
@staticmethod
def _get_cgroup_names():
"""
:return: a mapping between the subsystem name to the cgroup name
:rtype: dict[str, str]
"""
with open("/proc/self/cgroup") as f:
lines = f.readlines()
d = {}
for line in lines:
line_split = line.rstrip().split(":")
subsystem = line_split[1]
group_name = line_split[2]
d[subsystem] = group_name
return d
|
|
#!/usr/bin/python
import argparse
import logging
import time
import sys
import operator
import random
import getpass
import os.path
import POGOProtos.Enums.PokemonMove_pb2 as PokemonMove_pb2
from collections import Counter
from custom_exceptions import GeneralPogoException
from api import PokeAuthSession
from location import Location
from pokedex import pokedex
from inventory import items
def setupLogger():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
## Mass remove pokemon. It first displays the "Safe" numbers of pokemon that can be released, then makes sure you want to release them
def massRemove(session):
party = session.checkInventory().party
myParty = []
# Open the config file to create the exception list to NEVER transfer Pokemon
rf = open(os.path.dirname(__file__) + '/../exceptions.config')
exceptionList = rf.read().splitlines()
rf.close()
# Get the stats for all the pokemon in the party. Easier to store and nicer to display.
for pokemon in party:
IvPercent = ((pokemon.individual_attack + pokemon.individual_defense + pokemon.individual_stamina)*100)/45
L = [pokedex[pokemon.pokemon_id],pokemon.cp,pokemon.individual_attack,pokemon.individual_defense,pokemon.individual_stamina,IvPercent,pokemon]
myParty.append(L)
# Sort the list by name and then IV percent
myParty.sort(key = operator.itemgetter(0, 5))
safeIV = int(raw_input('\nWhat is your IV cut off? (Pokemon above this will be safe from transfer): '))
safeCP = int(raw_input('What is your CP cut off? (Pokemon above this will be safe from transfer): '))
# Create a "safe" party by removing good IVs and high CPs
safeParty = [item for item in myParty if item[5] < safeIV and item[1] < safeCP]
# Ask user which pokemon they want. This must be CAPITALS.
userPokemon = raw_input("\nWhich pokemon do you want to transfer? (ALL will transfer everything below the safe zones): ").upper()
# If they choose ALL, then sort by IV, not by name
if userPokemon == 'ALL':
safeParty.sort(key = operator.itemgetter(5))
# Show user all the "safe to remove" pokemon
refinedMonsters = []
print '\n'
print ' NAME | CP | ATK | DEF | STA | IV% '
print '---------------- | ----- | --- | --- | --- | ----'
for monster in safeParty:
if monster[0] == userPokemon or userPokemon == 'ALL' and monster[0] not in exceptionList:
if monster[5] > 74:
logging.info('\033[1;32;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5])
elif monster[5] > 49:
logging.info('\033[1;33;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5])
else:
logging.info('\033[1;37;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5])
refinedMonsters.append(monster)
# If they can't "safely" remove any pokemon, then send them to the main menu again
if len(refinedMonsters) < 1:
print "\nCannot safely transfer any Pokemon of this type. IVs or CP are too high."
mainMenu(session)
if userPokemon == 'ALL':
logging.info('\nCan safely remove %s Pokemon',len(refinedMonsters))
else:
logging.info('\nCan safely remove %s of this Pokemon',len(refinedMonsters))
# Ask how many they want to remove
userNumber = int(raw_input("How many do you want to remove?: "))
if userNumber == 0:
mainMenu(session)
# Show the pokemon that are going to be removed to confirm to user
print '\n'
i = 0
monstersToRelease = []
print ' NAME | CP | ATK | DEF | STA | IV% '
print '---------------- | ----- | --- | --- | --- | ----'
for monster in refinedMonsters:
if i < int(userNumber):
i = i + 1
if monster[5] > 74:
logging.info('\033[1;32;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5])
elif monster[5] > 49:
logging.info('\033[1;33;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5])
else:
logging.info('\033[1;37;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5])
monstersToRelease.append(monster)
# Double check they are okay to remove
if userPokemon == 'ALL':
if int(userNumber) > len(refinedMonsters):
logging.info('\nThis will transfer %s Pokemon',len(refinedMonsters))
else:
logging.info('\nThis will transfer %s Pokemon',userNumber)
else:
if int(userNumber) > len(refinedMonsters):
logging.info('\nThis will transfer %s of this Pokemon',len(refinedMonsters))
else:
logging.info('\nThis will transfer %s of this Pokemon',userNumber)
okayToProceed = raw_input('Do you want to transfer these Pokemon? (y/n): ').lower()
# Remove the pokemon! Use randomness to reduce chance of bot detection
outlier = random.randint(8,12)
index = 0
counter = 0
if okayToProceed == 'y':
for monster in monstersToRelease:
index = index + 1
counter = counter + 1
session.releasePokemon(monster[6])
logging.info('Transferring Pokemon %s of %s...',counter,len(monstersToRelease))
t = random.uniform(2.0, 5.0)
if index == outlier:
t = t * 3
outlier = random.randint(8,12)
index = 0
time.sleep(t)
# Go back to the main menu
mainMenu(session)
def massRemoveNonUnique(session):
party = session.checkInventory().party
pokemon_party = {}
iv_max_trade = int(raw_input('\nWhat is your IV cut off? (Pokemon above this will be safe from transfer): '))
cp_max_trade = int(raw_input('What is your CP cut off? (Pokemon above this will be safe from transfer): '))
rf = open(os.path.dirname(__file__) + '/../exceptions.config')
except_pokemon = rf.read().splitlines()
rf.close()
# Build the party into a dictionary
for p in party:
iv_percent = ((p.individual_attack + p.individual_defense + p.individual_stamina) * 100) / 45
pokemon_name = pokedex[p.pokemon_id]
if pokemon_name in except_pokemon:
continue
if pokemon_name not in pokemon_party:
pokemon_party[pokemon_name] = []
pokemon_party[pokemon_name].append((iv_percent, p))
# Start printing the pokemon to remove
print 'Removing the following pokemon...\n'
print ' NAME | CP | ATK | DEF | STA | IV% '
print '---------------- | ----- | --- | --- | --- | ----'
trade_pokemon = []
for k, pokemons in pokemon_party.iteritems():
if len(pokemons) <= 1:
continue
# Sort Pokemon by Highest IV first
pokemons.sort(key=operator.itemgetter(0), reverse=True)
for index, (iv_percent, pokemon) in enumerate(pokemons):
if index == 0 or pokemon.favorite:
continue
if iv_percent >= iv_max_trade:
continue
if pokemon.cp >= cp_max_trade:
continue
trade_pokemon.append(pokemon)
color = 37
if iv_percent > 74:
color = 32
elif iv_percent > 49:
color = 33
logging.info('\033[1;%d;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s \033[0m',
color, pokedex[pokemon.pokemon_id], pokemon.cp, pokemon.individual_attack,
pokemon.individual_defense, pokemon.individual_stamina, iv_percent)
time.sleep(0.1)
# Start removing the pokemon
if not len(trade_pokemon):
logging.info("\nNo Pokemon to be removed.")
else:
logging.info('\nCan safely remove %s Pokemon',len(trade_pokemon))
okayToProceed = raw_input('Do you want to transfer these Pokemon? (y/n): ').lower()
if okayToProceed == 'y':
outlier = 1
for index, pokemon in enumerate(trade_pokemon):
t = random.uniform(5.0, 7.0)
if index % outlier == 0:
outlier = random.randint(8, 12)
if index > 0:
t *= 3
print "Removed '%s'" % (pokedex[pokemon.pokemon_id].capitalize())
result = session.releasePokemon(pokemon)
time.sleep(t)
else:
logging.info('Aborting to mass trade of pokemon.')
mainMenu(session)
def massRename(session):
party = session.checkInventory().party
myParty = []
# Get the party and put it into a nicer list
for pokemon in party:
IvPercent = ((pokemon.individual_attack + pokemon.individual_defense + pokemon.individual_stamina)*100)/45
L = [pokedex[pokemon.pokemon_id],pokemon.cp,pokemon.individual_attack,pokemon.individual_defense,pokemon.individual_stamina,IvPercent,pokemon]
myParty.append(L)
# Sort party by name and then IV percentage
myParty.sort(key = operator.itemgetter(0, 5))
# Ask the user to enter an IV threshold (to only rename good pokemon)
userThreshold = int(raw_input('Enter an IV% threshold to rename Pokemon (0 will rename all): '))
# Refine a party with the IV threshold
print '\n NAME | CP | ATK | DEF | STA | IV% '
print '---------------- | ----- | --- | --- | --- | ----'
refinedParty = []
for monster in myParty:
if monster[5] > userThreshold and monster[6].nickname != str(monster[5]) + '-' + str(monster[2]) + '/' + str(monster[3]) + '/' + str(monster[4]):
logging.info(' %-15s | %-5s | %-3s | %-3s | %-3s | %-3s | %s',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5],monster[6].nickname)
refinedParty.append(monster)
# Show how many it will rename and if they want to continue
if len(refinedParty) == 0:
logging.info('\nNo Pokemon to be renamed')
mainMenu(session)
logging.info('\nThis will rename %s Pokemon.',len(refinedParty))
okayToProceed = raw_input('Do you want to rename these Pokemon? (y/n): ').lower()
# Rename the pokemon! Use randomness to reduce chance of bot detection
outlier = random.randint(8,12)
index = 0
if okayToProceed == 'y':
for monster in refinedParty:
index = index + 1
session.nicknamePokemon(monster[6],str(monster[0]) + ' ' + str(monster[5]))
logging.info('Renamed ' + monster[0] + ' to ' + (monster[0]) + ' ' + str(monster[5]))
t = random.uniform(4.0, 8.0)
if index == outlier:
t = t * 2
outlier = random.randint(8,12)
index = 0
time.sleep(t)
mainMenu(session)
def viewCounts(session):
party = session.checkInventory().party
myParty = []
# Get the party and put it into a nicer list
for pokemon in party:
L = pokedex[pokemon.pokemon_id]
myParty.append(L)
# Count the number of pokemon, put them in a list, and sort alphabetically
countRepeats = Counter(myParty)
countListTmp = countRepeats.items()
countList = []
for entry in countListTmp:
item = list(entry)
pokedexNum = getattr(pokedex, item[0])
item.append(pokedexNum)
countList.append(item)
# logging.info(countList)
sortBy = int(raw_input('How to sort the list? (1 = Alphabetically, 2 = Total Numbers, 3 = Pokedex): '))
countList.sort(key = operator.itemgetter(sortBy - 1))
# Ask if they want to save to CSV
saveCSV = raw_input('Do you want to export to CSV file? (y/n): ').lower()
if saveCSV == 'y':
f = open('My_Pokemon_Counts.csv', 'w')
# Total number of Pokemon that can be evolved
# Number of evolutions per Pokemon
countEvolutions = 0
evolutions = 0
# Print the list of pokemon in a nicer format
if saveCSV == 'y':
f.write('NAME,COUNT,CANDIES,EVOLVE\n')
print '\n NAME | COUNT | CANDIES | EVOLVE '
print '---------------- | ----- | ------- | ------ '
for monster in countList:
evolutions = ''
skipCount = 0
pokedexNum = getattr(pokedex, monster[0])
try:
candies = session.checkInventory().candies[pokedexNum]
except:
skipCount = 1
try:
candies = session.checkInventory().candies[pokedexNum - 1]
except:
try:
candies = session.checkInventory().candies[pokedexNum - 2]
except:
candies = 0
if(pokedex.evolves[pokedexNum]):
evolutions = min(monster[1],int((candies-1)/pokedex.evolves[pokedexNum]))
if evolutions > 0 and skipCount == 0:
countEvolutions += evolutions
if evolutions == 0:
evolutions = ''
print ' %-15s | %-5d | %-7d | %s ' % (monster[0], monster[1], candies, evolutions)
# Write to the CSV
if saveCSV == 'y':
f.write(monster[0] + ',' + str(monster[1]) + ',' + str(candies) + ',' + str(evolutions) + '\n')
logging.info('\nYou can evolve a total of %s Base Pokemon.', countEvolutions)
# Close the CSV
if saveCSV == 'y':
logging.info('Saved to My_Pokemon_Counts.csv')
f.close()
mainMenu(session)
def viewPokemon(session):
party = session.checkInventory().party
myParty = []
# Get the party and put it into a nicer list
for pokemon in party:
IvPercent = ((pokemon.individual_attack + pokemon.individual_defense + pokemon.individual_stamina)*100)/45
# Get the names of the moves and remove the _FAST part of move 1
move_1 = PokemonMove_pb2.PokemonMove.Name(pokemon.move_1)
move_1 = move_1[:-5]
move_2 = PokemonMove_pb2.PokemonMove.Name(pokemon.move_2)
L = [pokedex[pokemon.pokemon_id],pokemon.cp,pokemon.individual_attack,pokemon.individual_defense,pokemon.individual_stamina,IvPercent,pokemon,move_1,move_2]
myParty.append(L)
# Sort party by name and then IV percentage
myParty.sort(key = operator.itemgetter(0, 5))
# Ask if they want to save to CSV
saveCSV = raw_input('Do you want to export to CSV file? (y/n): ').lower()
if saveCSV == 'y':
f = open('My_Pokemon.csv', 'w')
# Display the pokemon, with color coding for IVs and separation between types of pokemon
i = 0
# Write headings to the CSV
if saveCSV == 'y':
f.write('NAME,CP,ATK,DEF,STA,IV%,MOVE 1,MOVE 2\n')
print '\n NAME | CP | ATK | DEF | STA | IV% | MOVE 1 | MOVE 2 '
print '---------------- | ----- | --- | --- | --- | --- | --------------- | --------------- '
for monster in myParty:
# Write to the CSV
if saveCSV == 'y':
f.write(monster[0] + ',' + str(monster[1]) + ',' + str(monster[2]) + ',' + str(monster[3]) + ',' + str(monster[4]) + ',' + str(monster[5]) + ',' + monster[7] + ',' + monster[8] + '\n')
if i > 0:
if myParty[i][0] != myParty[i-1][0]:
print '---------------- | ----- | --- | --- | --- | --- | --------------- | --------------- '
if monster[5] > 74:
logging.info('\033[1;32;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s | %-15s | %s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5],monster[7],monster[8])
elif monster[5] > 49:
logging.info('\033[1;33;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s | %-15s | %s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5],monster[7],monster[8])
else:
logging.info('\033[1;37;40m %-15s | %-5s | %-3s | %-3s | %-3s | %-3s | %-15s | %s \033[0m',monster[0],monster[1],monster[2],monster[3],monster[4],monster[5],monster[7],monster[8])
i = i+1
# Close the CSV
if saveCSV == 'y':
logging.info('\nSaved to My_Pokemon.csv')
f.close()
mainMenu(session)
def mainMenu(session):
print '\n\n MAIN MENU'
print ' ---------'
print ' 1: View Pokemon'
print ' 2: View Counts'
print ' 3: Transfer Pokemon'
print ' 4: Transfer Duplicate Pokemon'
print ' 5: Rename Pokemon'
print ' 6: Exit'
menuChoice = int(raw_input("\nEnter choice: "))
if menuChoice == 1: viewPokemon(session)
elif menuChoice == 2: viewCounts(session)
elif menuChoice == 3: massRemove(session)
elif menuChoice == 4: massRemoveNonUnique(session)
elif menuChoice == 5: massRename(session)
elif menuChoice == 6: quit()
else: quit()
# Entry point
# Start off authentication and demo
if __name__ == '__main__':
setupLogger()
logging.debug('Logger set up')
# Read in args
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--auth", help="Auth Service", required=True)
parser.add_argument("-u", "--username", help="Username", required=True)
parser.add_argument("-p", "--password", help="Password", required=False)
parser.add_argument("-l", "--location", help="Location", required=False)
parser.add_argument("-g", "--geo_key", help="GEO API Secret")
args = parser.parse_args()
# Check service
if args.auth not in ['ptc', 'google']:
logging.error('Invalid auth service {}'.format(args.auth))
sys.exit(-1)
# Check password
if args.password == None:
args.password = getpass.getpass()
# Create PokoAuthObject
poko_session = PokeAuthSession(
args.username,
args.password,
args.auth,
geo_key=args.geo_key
)
# Authenticate with a given location
# Location is not inherent in authentication
# But is important to session
if args.location != '':
session = poko_session.authenticate(args.location)
else:
session = poko_session.authenticate()
# Time to show off what we can do
if session:
mainMenu(session)
else:
logging.critical('Session not created successfully')
|
|
from globals import *
import alife
import logging
import random
import os
def prettify_string_array(array, max_length):
"""Returns a human readable string from an array of strings."""
_string = ''
_i = 0
for entry in array:
if len(_string) > max_length:
_string += ', and %s more.' % (_i+1)
break
if _i == 0:
_string += entry
elif 0<_i<len(array)-1:
_string += ', %s' % entry
elif _i == len(array)-1:
_string += ' and %s.' % entry
_i += 1
return _string
def get_name(life):
return ' '.join(life['name'])
def get_real_direction(direction, short=False):
if abs(direction)<22 or abs(direction-360)<22:
if short:
return 'e'
return 'east'
elif abs(direction-45)<22:
if short:
return 'ne'
return 'northeast'
elif abs(direction-90)<22:
if short:
return 'n'
return 'north'
elif abs(direction-135)<22:
if short:
return 'nw'
return 'northwest'
elif abs(direction-180)<22:
if short:
return 'w'
return 'west'
elif abs(direction-225)<22:
if short:
return 'sw'
return 'southwest'
elif abs(direction-270)<22:
if short:
return 's'
return 'south'
elif abs(direction-315)<22:
if short:
return 'se'
return 'southeast'
else:
if short:
return 'e'
return 'east'
def get_real_distance(distance):
"""Returns the real-life representation of a distance."""
if SETTINGS['distance unit'] == 'Yards':
return distance*YARDS
else:
return distance*METERS
def get_real_distance_string(distance, round_up=False):
_distance = get_real_distance(distance)
_mods = ''
if round_up:
_distance = int(round(_distance))
if not _distance == 1:
_mods = 's'
if SETTINGS['distance unit'] == 'Yards':
return '%s yd%s' % (_distance, _mods)
return '%s m%s' % (_distance, _mods)
def get_name_ownership(life, pronoun=False):
if pronoun:
if life['type'] == 'humanoid':
return 'his'
else:
return 'its'
return '%s\'s' % ' '.join(life['name'])
def get_introduction(life, posession=False):
if 'player' in life:
if posession:
return 'Your'
return 'You'
if life['type'] == 'humanoid':
if posession:
return '%s\'s' % get_name(life)
else:
return get_name(life)
else:
#TODO: Check limb conditions
if posession:
return 'The %s\'s' % life['species']
else:
return 'The %s' % life['species']
def _load_strings(a, directory, filenames):
for filename in [f for f in filenames if f.count('.txt')]:
_map_name = filename.strip('.txt')
TEXT_MAP[_map_name] = []
with open(os.path.join(directory, filename), 'r') as e:
TEXT_MAP[_map_name].extend([line.strip() for line in e.readlines()])
def load_strings():
#TODO: Use better walk, like one in profiles.py
try:
os.path.walk(TEXT_DIR, _load_strings, None)
load_dialog()
except Exception, e:
raise Exception(e)
def load_dialog():
with open(os.path.join(TEXT_DIR, 'dialog.txt')) as f:
for line in f.readlines():
line = line.rstrip()
if not line or line.startswith('#'):
continue
try:
_gist, _requirements, _text, _result = line.split(':')
except:
raise Exception('Error in dialog (wrong number of arguments): %s' % line)
_dialog = {'gist': _gist,
'requirements': _requirements.split(','),
'text': _text,
'result': _result}
if _gist in DIALOG_TOPICS:
DIALOG_TOPICS[_gist].append(_dialog)
else:
DIALOG_TOPICS[_gist] = [_dialog]
logging.debug('Loaded dialog.')
def generate_place_name():
if not TEXT_MAP['places']:
return 'Zoolandia %s' % WORLD_INFO['ticks']
return TEXT_MAP['places'].pop(random.randint(0, len(TEXT_MAP['places'])-1))
def generate_scheme_title():
return TEXT_MAP['nouns'][random.randint(0, len(TEXT_MAP['nouns'])-1)]
def generate_first_and_last_name_from_species(species):
_map_first_names = '%s_first_names' % species
_map_last_names = '%s_last_names' % species
if not TEXT_MAP[_map_first_names] or not TEXT_MAP[_map_last_names]:
return ('Wayne', 'Brady')
_first_name = TEXT_MAP[_map_first_names].pop(random.randint(0, len(TEXT_MAP[_map_first_names])-1))
_last_name = TEXT_MAP[_map_last_names].pop(random.randint(0, len(TEXT_MAP[_map_last_names])-1))
return (_first_name, _last_name)
def format_injury(injury):
if injury['lodged_item']:
return 'a %s lodged in the %s' % (ITEMS[injury['lodged_item']]['name'], injury['limb'])
elif injury['artery_ruptured']:
return 'a ruptured artery in the %s' % injury['limb']
elif injury['cut']:
return 'a cut to the %s' % injury['limb']
return 'nothing in particular.'
def generate_memory_phrase(memory):
_details = [key for key in memory.keys() if not key == 'text']
_memory_age = WORLD_INFO['ticks']-memory['time_created']
_topic = memory['text']
if _topic == 'friendly':
return '%s seems like a good guy.' % (' '.join(LIFE[memory['target']]['name']))
else:
print 'DIDNT HAVE A PHRASE FOR',_topic
|
|
#! /usr/bin/env python
# $Id: unicode2rstsubs.py 5618 2008-07-28 08:37:32Z strank $
# Author: David Goodger <goodger@python.org>
# Copyright: This program has been placed in the public domain.
"""
unicode2subfiles.py -- produce character entity files (reSructuredText
substitutions) from the W3C master unicode.xml file.
This program extracts character entity and entity set information from a
unicode.xml file and produces multiple reStructuredText files (in the current
directory) containing substitutions. Entity sets are from ISO 8879 & ISO
9573-13 (combined), MathML, and HTML4. One or two files are produced for each
entity set; a second file with a "-wide.txt" suffix is produced if there are
wide-Unicode characters in the set.
The input file, unicode.xml, is maintained as part of the MathML 2
Recommentation XML source, and is available from
<http://www.w3.org/2003/entities/xml/>.
"""
import sys
import os
import optparse
import re
from xml.parsers.expat import ParserCreate
usage_msg = """Usage: %s [unicode.xml]"""
def usage(prog, status=0, msg=None):
print >>sys.stderr, usage_msg % prog
if msg:
print >>sys.stderr, msg
sys.exit(status)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 2:
inpath = argv[1]
elif len(argv) > 2:
usage(argv[0], 2,
'Too many arguments (%s): only 1 expected.' % (len(argv) - 1))
else:
inpath = 'unicode.xml'
if not os.path.isfile(inpath):
usage(argv[0], 1, 'No such file: "%s".' % inpath)
infile = open(inpath)
process(infile)
def process(infile):
grouper = CharacterEntitySetExtractor(infile)
grouper.group()
grouper.write_sets()
class CharacterEntitySetExtractor:
"""
Extracts character entity information from unicode.xml file, groups it by
entity set, and writes out reStructuredText substitution files.
"""
unwanted_entity_sets = ['stix', # unknown, buggy set
'predefined']
header = """\
.. This data file has been placed in the public domain.
.. Derived from the Unicode character mappings available from
<http://www.w3.org/2003/entities/xml/>.
Processed by unicode2rstsubs.py, part of Docutils:
<http://docutils.sourceforge.net>.
"""
def __init__(self, infile):
self.infile = infile
"""Input unicode.xml file."""
self.parser = self.setup_parser()
"""XML parser."""
self.elements = []
"""Stack of element names. Last is current element."""
self.sets = {}
"""Mapping of charent set name to set dict."""
self.charid = None
"""Current character's "id" attribute value."""
self.descriptions = {}
"""Mapping of character ID to description."""
def setup_parser(self):
parser = ParserCreate()
parser.StartElementHandler = self.StartElementHandler
parser.EndElementHandler = self.EndElementHandler
parser.CharacterDataHandler = self.CharacterDataHandler
return parser
def group(self):
self.parser.ParseFile(self.infile)
def StartElementHandler(self, name, attributes):
self.elements.append(name)
handler = name + '_start'
if hasattr(self, handler):
getattr(self, handler)(name, attributes)
def EndElementHandler(self, name):
assert self.elements[-1] == name, \
'unknown end-tag %r (%r)' % (name, self.element)
self.elements.pop()
handler = name + '_end'
if hasattr(self, handler):
getattr(self, handler)(name)
def CharacterDataHandler(self, data):
handler = self.elements[-1] + '_data'
if hasattr(self, handler):
getattr(self, handler)(data)
def character_start(self, name, attributes):
self.charid = attributes['id']
def entity_start(self, name, attributes):
set = self.entity_set_name(attributes['set'])
if not set:
return
if set not in self.sets:
print 'bad set: %r' % set
return
entity = attributes['id']
assert (entity not in self.sets[set]
or self.sets[set][entity] == self.charid), \
('sets[%r][%r] == %r (!= %r)'
% (set, entity, self.sets[set][entity], self.charid))
self.sets[set][entity] = self.charid
def description_data(self, data):
self.descriptions.setdefault(self.charid, '')
self.descriptions[self.charid] += data
entity_set_name_pat = re.compile(r'[0-9-]*(.+)$')
"""Pattern to strip ISO numbers off the beginning of set names."""
def entity_set_name(self, name):
"""
Return lowcased and standard-number-free entity set name.
Return ``None`` for unwanted entity sets.
"""
match = self.entity_set_name_pat.match(name)
name = match.group(1).lower()
if name in self.unwanted_entity_sets:
return None
self.sets.setdefault(name, {})
return name
def write_sets(self):
sets = self.sets.keys()
sets.sort()
for set_name in sets:
self.write_set(set_name)
def write_set(self, set_name, wide=None):
if wide:
outname = set_name + '-wide.txt'
else:
outname = set_name + '.txt'
outfile = open(outname, 'w')
print 'writing file "%s"' % outname
print >>outfile, self.header
set = self.sets[set_name]
entities = [(e.lower(), e) for e in set.keys()]
entities.sort()
longest = 0
for _, entity_name in entities:
longest = max(longest, len(entity_name))
has_wide = None
for _, entity_name in entities:
has_wide = self.write_entity(
set, set_name, entity_name, outfile, longest, wide) or has_wide
if has_wide and not wide:
self.write_set(set_name, 1)
def write_entity(self, set, set_name, entity_name, outfile, longest,
wide=None):
charid = set[entity_name]
if not wide:
for code in charid[1:].split('-'):
if int(code, 16) > 0xFFFF:
return 1 # wide-Unicode character
codes = ' '.join(['U+%s' % code for code in charid[1:].split('-')])
print >>outfile, ('.. %-*s unicode:: %s .. %s'
% (longest + 2, '|' + entity_name + '|',
codes, self.descriptions[charid]))
if __name__ == '__main__':
sys.exit(main())
|
|
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from Selenium2Library import utils
from Selenium2Library.locators import ElementFinder
from keywordgroup import KeywordGroup
class _ElementKeywords(KeywordGroup):
def __init__(self):
self._element_finder = ElementFinder()
# Public, element lookups
def current_frame_contains(self, text, loglevel='INFO'):
"""Verifies that current frame contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if not self._is_text_present(text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def current_frame_should_not_contain(self, text, loglevel='INFO'):
"""Verifies that current frame contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if self._is_text_present(text):
self.log_source(loglevel)
raise AssertionError("Page should not have contained text '%s' "
"but it did" % text)
self._info("Current page should not contain text '%s'." % text)
def element_should_contain(self, locator, expected, message=''):
"""Verifies element identified by `locator` contains text `expected`.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' contains text '%s'."
% (locator, expected))
actual = self._get_text(locator)
if not expected in actual:
if not message:
message = "Element '%s' should have contained text '%s' but "\
"its text was '%s'." % (locator, expected, actual)
raise AssertionError(message)
def frame_should_contain(self, locator, text, loglevel='INFO'):
"""Verifies frame identified by `locator` contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
if not self._frame_contains(locator, text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def page_should_contain(self, text, loglevel='INFO'):
"""Verifies that current page contains `text`.
If this keyword fails, it automatically logs the page source
using the log level specified with the optional `loglevel` argument.
Giving `NONE` as level disables logging.
"""
if not self._page_contains(text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def page_should_contain_element(self, locator, message='', loglevel='INFO'):
"""Verifies element identified by `locator` is found on the current page.
`message` can be used to override default error message.
See `Page Should Contain` for explanation about `loglevel` argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, None, message, loglevel)
def page_should_not_contain(self, text, loglevel='INFO'):
"""Verifies the current page does not contain `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if self._page_contains(text):
self.log_source(loglevel)
raise AssertionError("Page should not have contained text '%s'" % text)
self._info("Current page does not contain text '%s'." % text)
def page_should_not_contain_element(self, locator, message='', loglevel='INFO'):
"""Verifies element identified by `locator` is not found on the current page.
`message` can be used to override the default error message.
See `Page Should Contain ` for explanation about `loglevel` argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, None, message, loglevel)
# Public, attributes
def assign_id_to_element(self, locator, id):
"""Assigns a temporary identifier to element specified by `locator`.
This is mainly useful if the locator is complicated/slow XPath expression.
Identifier expires when the page is reloaded.
Example:
| Assign ID to Element | xpath=//div[@id="first_div"] | my id |
| Page Should Contain Element | my id |
"""
self._info("Assigning temporary id '%s' to element '%s'" % (id, locator))
element = self._element_find(locator, True, True)
self._current_browser().execute_script("arguments[0].id = '%s';" % id, element)
def element_should_be_disabled(self, locator):
"""Verifies that element identified with `locator` is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if self._is_enabled(locator):
raise AssertionError("Element '%s' is enabled." % (locator))
def element_should_be_enabled(self, locator):
"""Verifies that element identified with `locator` is enabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if not self._is_enabled(locator):
raise AssertionError("Element '%s' is disabled." % (locator))
def element_should_be_visible(self, locator, message=''):
"""Verifies that the element identified by `locator` is visible.
Herein, visible means that the element is logically visible, not optically
visible in the current browser viewport. For example, an element that carries
display:none is not logically visible, so using this keyword on that element
would fail.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' is visible." % locator)
visible = self._is_visible(locator)
if not visible:
if not message:
message = "The element '%s' should be visible, but it "\
"is not." % locator
raise AssertionError(message)
def element_should_not_be_visible(self, locator, message=''):
"""Verifies that the element identified by `locator` is NOT visible.
This is the opposite of `Element Should Be Visible`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' is not visible." % locator)
visible = self._is_visible(locator)
if visible:
if not message:
message = "The element '%s' should not be visible, "\
"but it is." % locator
raise AssertionError(message)
def element_text_should_be(self, locator, expected, message=''):
"""Verifies element identified by `locator` exactly contains text `expected`.
In contrast to `Element Should Contain`, this keyword does not try
a substring match but an exact match on the element identified by `locator`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' contains exactly text '%s'."
% (locator, expected))
element = self._element_find(locator, True, True)
actual = element.text
if expected != actual:
if not message:
message = "The text of element '%s' should have been '%s' but "\
"in fact it was '%s'." % (locator, expected, actual)
raise AssertionError(message)
def get_element_attribute(self, attribute_locator):
"""Return value of element attribute.
`attribute_locator` consists of element locator followed by an @ sign
and attribute name, for example "element_id@class".
"""
locator, attribute_name = self._parse_attribute_locator(attribute_locator)
element = self._element_find(locator, True, False)
if element is None:
raise ValueError("Element '%s' not found." % (locator))
return element.get_attribute(attribute_name)
def get_horizontal_position(self, locator):
"""Returns horizontal position of element identified by `locator`.
The position is returned in pixels off the left side of the page,
as an integer. Fails if a matching element is not found.
See also `Get Vertical Position`.
"""
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("Could not determine position for '%s'" % (locator))
return element.location['x']
def get_value(self, locator):
"""Returns the value attribute of element identified by `locator`.
See `introduction` for details about locating elements.
"""
return self._get_value(locator)
def get_text(self, locator):
"""Returns the text value of element identified by `locator`.
See `introduction` for details about locating elements.
"""
return self._get_text(locator)
def get_vertical_position(self, locator):
"""Returns vertical position of element identified by `locator`.
The position is returned in pixels off the top of the page,
as an integer. Fails if a matching element is not found.
See also `Get Horizontal Position`.
"""
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("Could not determine position for '%s'" % (locator))
return element.location['y']
# Public, mouse input/events
def click_element(self, locator):
"""Click element identified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Clicking element '%s'." % locator)
self._element_find(locator, True, True).click()
def click_element_at_coordinates(self, locator, xoffset, yoffset):
"""Click element identified by `locator` at x/y coordinates of the element.
Cursor is moved and the center of the element and x/y coordinates are
calculted from that point.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Click clicking element '%s' in coordinates '%s', '%s'." % (locator, xoffset, yoffset))
element = self._element_find(locator, True, True)
#self._element_find(locator, True, True).click()
#ActionChains(self._current_browser()).move_to_element_with_offset(element, xoffset, yoffset).click().perform()
ActionChains(self._current_browser()).move_to_element(element).move_by_offset(xoffset, yoffset).click().perform()
def double_click_element(self, locator):
"""Double click element identified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Double clicking element '%s'." % locator)
element = self._element_find(locator, True, True)
ActionChains(self._current_browser()).double_click(element).perform()
def focus(self, locator):
"""Sets focus to element identified by `locator`."""
element = self._element_find(locator, True, True)
self._current_browser().execute_script("arguments[0].focus();", element)
def drag_and_drop(self, source, target):
"""Drags element identified with `source` which is a locator.
Element can be moved on top of another element with `target`
argument.
`target` is a locator of the element where the dragged object is
dropped.
Examples:
| Drag And Drop | elem1 | elem2 | # Move elem1 over elem2. |
"""
src_elem = self._element_find(source,True,True)
trg_elem = self._element_find(target,True,True)
ActionChains(self._current_browser()).drag_and_drop(src_elem, trg_elem).perform()
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""Drags element identified with `source` which is a locator.
Element will be moved by xoffset and yoffset. each of which is a
negative or positive number specify the offset.
Examples:
| Drag And Drop | myElem | 50 | -35 | # Move myElem 50px right and 35px down. |
"""
src_elem = self._element_find(source, True, True)
ActionChains(self._current_browser()).drag_and_drop_by_offset(src_elem, xoffset, yoffset).perform()
def mouse_down(self, locator):
"""Simulates pressing the left mouse button on the element specified by `locator`.
The element is pressed without releasing the mouse button.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
See also the more specific keywords `Mouse Down On Image` and
`Mouse Down On Link`.
"""
self._info("Simulating Mouse Down on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).click_and_hold(element).perform()
def mouse_out(self, locator):
"""Simulates moving mouse away from the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Out on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
size = element.size
offsetx = (size['width'] / 2) + 1
offsety = (size['height'] / 2) + 1
ActionChains(self._current_browser()).move_to_element(element).move_by_offset(offsetx, offsety).perform()
def mouse_over(self, locator):
"""Simulates hovering mouse over the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Over on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).move_to_element(element).perform()
def mouse_up(self, locator):
"""Simulates releasing the left mouse button on the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Up on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).click_and_hold(element).release(element).perform()
def open_context_menu(self, locator):
"""Opens context menu on element identified by `locator`."""
element = self._element_find(locator, True, True)
ActionChains(self._current_browser()).context_click(element).perform()
def simulate(self, locator, event):
"""Simulates `event` on element identified by `locator`.
This keyword is useful if element has OnEvent handler that needs to be
explicitly invoked.
See `introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True)
script = """
element = arguments[0];
eventName = arguments[1];
if (document.createEventObject) { // IE
return element.fireEvent('on' + eventName, document.createEventObject());
}
var evt = document.createEvent("HTMLEvents");
evt.initEvent(eventName, true, true);
return !element.dispatchEvent(evt);
"""
self._current_browser().execute_script(script, element, event)
def press_key(self, locator, key):
"""Simulates user pressing key on element identified by `locator`.
`key` is either a single character, or a numerical ASCII code of the key
lead by '\\'.
Examples:
| Press Key | text_field | q |
| Press Key | login_button | \\13 | # ASCII code for enter key |
"""
if key.startswith('\\') and len(key) > 1:
key = self._map_ascii_key_code_to_key(int(key[1:]))
#if len(key) > 1:
# raise ValueError("Key value '%s' is invalid.", key)
element = self._element_find(locator, True, True)
#select it
element.send_keys(key)
# Public, links
def click_link(self, locator):
"""Clicks a link identified by locator.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
self._info("Clicking link '%s'." % locator)
link = self._element_find(locator, True, True, tag='a')
link.click()
def get_all_links(self):
"""Returns a list containing ids of all links found in current page.
If a link has no id, an empty string will be in the list instead.
"""
links = []
for anchor in self._element_find("tag=a", False, False, 'a'):
links.append(anchor.get_attribute('id'))
return links
def mouse_down_on_link(self, locator):
"""Simulates a mouse down event on a link.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True, 'link')
ActionChains(self._current_browser()).click_and_hold(element).perform()
def page_should_contain_link(self, locator, message='', loglevel='INFO'):
"""Verifies link identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, 'link', message, loglevel)
def page_should_not_contain_link(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, 'link', message, loglevel)
# Public, images
def click_image(self, locator):
"""Clicks an image found by `locator`.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._info("Clicking image '%s'." % locator)
element = self._element_find(locator, True, False, 'image')
if element is None:
# A form may have an image as it's submit trigger.
element = self._element_find(locator, True, True, 'input')
element.click()
def mouse_down_on_image(self, locator):
"""Simulates a mouse down event on an image.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True, 'image')
ActionChains(self._current_browser()).click_and_hold(element).perform()
def page_should_contain_image(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, 'image', message, loglevel)
def page_should_not_contain_image(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, 'image', message, loglevel)
# Public, xpath
def get_matching_xpath_count(self, xpath):
"""Returns number of elements matching `xpath`
If you wish to assert the number of matching elements, use
`Xpath Should Match X Times`.
"""
count = len(self._element_find("xpath=" + xpath, False, False))
return str(count)
def xpath_should_match_x_times(self, xpath, expected_xpath_count, message='', loglevel='INFO'):
"""Verifies that the page contains the given number of elements located by the given `xpath`.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
"""
actual_xpath_count = len(self._element_find("xpath=" + xpath, False, False))
if int(actual_xpath_count) != int(expected_xpath_count):
if not message:
message = "Xpath %s should have matched %s times but matched %s times"\
%(xpath, expected_xpath_count, actual_xpath_count)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page contains %s elements matching '%s'."
% (actual_xpath_count, xpath))
# Private
def _element_find(self, locator, first_only, required, tag=None):
browser = self._current_browser()
elements = self._element_finder.find(browser, locator, tag)
if required and len(elements) == 0:
raise ValueError("Element locator '" + locator + "' did not match any elements.")
if first_only:
if len(elements) == 0: return None
return elements[0]
return elements
def _frame_contains(self, locator, text):
browser = self._current_browser()
element = self._element_find(locator, True, True)
browser.switch_to_frame(element)
self._info("Searching for text from frame '%s'." % locator)
found = self._is_text_present(text)
browser.switch_to_default_content()
return found
def _get_text(self, locator):
element = self._element_find(locator, True, True)
if element is not None:
return element.text
return None
def _get_value(self, locator, tag=None):
element = self._element_find(locator, True, False, tag=tag)
return element.get_attribute('value') if element is not None else None
def _is_enabled(self, locator):
element = self._element_find(locator, True, True)
if not self._is_form_element(element):
raise AssertionError("ERROR: Element %s is not an input." % (locator))
if not element.is_enabled():
return False
read_only = element.get_attribute('readonly')
if read_only == 'readonly' or read_only == 'true':
return False
return True
def _is_text_present(self, text):
locator = "xpath=//*[contains(., %s)]" % utils.escape_xpath_value(text);
return self._is_element_present(locator)
def _is_visible(self, locator):
element = self._element_find(locator, True, False)
if element is not None:
return element.is_displayed()
return None
def _map_ascii_key_code_to_key(self, key_code):
map = {
0: Keys.NULL,
8: Keys.BACK_SPACE,
9: Keys.TAB,
10: Keys.RETURN,
13: Keys.ENTER,
24: Keys.CANCEL,
27: Keys.ESCAPE,
32: Keys.SPACE,
42: Keys.MULTIPLY,
43: Keys.ADD,
44: Keys.SEPARATOR,
45: Keys.SUBTRACT,
56: Keys.DECIMAL,
57: Keys.DIVIDE,
59: Keys.SEMICOLON,
61: Keys.EQUALS,
127: Keys.DELETE
}
key = map.get(key_code)
if key is None:
key = chr(key_code)
return key
def _parse_attribute_locator(self, attribute_locator):
parts = attribute_locator.rpartition('@')
if len(parts[0]) == 0:
raise ValueError("Attribute locator '%s' does not contain an element locator." % (attribute_locator))
if len(parts[2]) == 0:
raise ValueError("Attribute locator '%s' does not contain an attribute name." % (attribute_locator))
return (parts[0], parts[2])
def _is_element_present(self, locator, tag=None):
return (self._element_find(locator, True, False, tag=tag) != None)
def _page_contains(self, text):
browser = self._current_browser()
browser.switch_to_default_content()
if self._is_text_present(text):
return True
subframes = self._element_find("xpath=//frame|//iframe", False, False)
self._debug('Current frame has %d subframes' % len(subframes))
for frame in subframes:
browser.switch_to_frame(frame)
found_text = self._is_text_present(text)
browser.switch_to_default_content()
if found_text:
return True
return False
def _page_should_contain_element(self, locator, tag, message, loglevel):
element_name = tag if tag is not None else 'element'
if not self._is_element_present(locator, tag):
if not message:
message = "Page should have contained %s '%s' but did not"\
% (element_name, locator)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page contains %s '%s'." % (element_name, locator))
def _page_should_not_contain_element(self, locator, tag, message, loglevel):
element_name = tag if tag is not None else 'element'
if self._is_element_present(locator, tag):
if not message:
message = "Page should not have contained %s '%s'"\
% (element_name, locator)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page does not contain %s '%s'."
% (element_name, locator))
|
|
# Custom rate is calculated for unique events in the simulation
# Author = Thomas Davis, email = txd283@bham.ac.uk / University of Birmingham
# This rate calculator has a full implementation of the pair interaction method
# for up to second nearest neighbours
# Cu and Vacancies clustering works
# Only first neighbour hops are included in the processes.py, and thus no code
# here has been incoperated for second neighbour hops
from KMCLib import *
from math import floor
import numpy
import math
# values required for vacancy diffusion, energy in eV, T in K, v is the jump in s^-1
E_m_Fe = 0.722
E_m_Cu = 0.50
k = 0.862e-4
T = 563
v_Fe = 9.79e12
v_Cu = 7.16e12
kT = k*T
# pair interaction values
e_FeFe1 = -0.778
e_FeFe2 = -0.389
e_VaFe1 = -0.191
e_VaFe2 = -0.096
e_VaVa1 = 0.225
e_VaVa2 = -0.047
e_VaCu1 = -0.247
e_VaCu2 = -0.206
e_FeCu1 = -0.585
e_FeCu2 = -0.326
e_CuCu1 = -0.627
e_CuCu2 = -0.314
# The first nearest neighbours for all atoms in the lattice in types_before and types_after.
# Used to find local configurations for energy calculations
NN1 = [[ 1, 2, 3, 4, 5, 6, 7, 8,],
[ 0, 9, 10, 11, 15, 16, 19, 51,],
[ 0, 9, 10, 12, 15, 17, 20, 52,],
[ 0, 9, 11, 13, 16, 18, 21, 53,],
[ 0, 9, 12, 13, 17, 18, 22, 54,],
[ 0, 10, 11, 14, 19, 23, 24, 55,],
[ 0, 10, 12, 14, 20, 23, 25, 56,],
[ 0, 11, 13, 14, 21, 24, 26, 57,],
[ 0, 12, 13, 14, 22, 25, 26, 58,],
[ 1, 2, 3, 4, 27, 28, 29, 30,],
[ 1, 2, 5, 6, 31, 32, 39, 40,],
[ 1, 3, 5, 7, 33, 35, 41, 43,],
[ 2, 4, 6, 8, 34, 36, 42, 44,],
[ 3, 4, 7, 8, 37, 38, 45, 46,],
[ 5, 6, 7, 8, 47, 48, 49, 50,]]
# The second nearest neighbours for all atoms in the lattice in types_before and types_after.
# Used to find local configurations for energy calculations
NN2 = [[ 9, 10, 11, 12, 13, 14,],
[ 2, 3, 5, 27, 31, 33,],
[ 1, 4, 6, 28, 32, 34,],
[ 1, 4, 7, 29, 35, 37,],
[ 2, 3, 8, 30, 36, 38,],
[ 1, 6, 7, 39, 41, 47,],
[ 2, 5, 8, 40, 42, 48,],
[ 3, 5, 8, 43, 45, 49,],
[ 4, 6, 7, 44, 46, 50,],
[ 0, 15, 16, 17, 18, 59,],
[ 0, 15, 19, 20, 23, 60,],
[ 0, 16, 19, 21, 24, 61,],
[ 0, 17, 20, 22, 25, 62,],
[ 0, 18, 21, 22, 26, 63,],
[ 0, 23, 24, 25, 26, 64,]]
class CustomRateCalculator(KMCRateCalculatorPlugin):
# uncomment if you want a counter for the number of times the fuction rate() is called -- for diagnositics.
# def initialize(self):
# used for calculating how many times rate fuction is called.
#self._times_called = 0
def rate(self, geometry, types_before, types_after, rate_constant, process_number, coordinate):
# see above -- diagnositics
#self._times_called += 1
# find the new position of the moved atom
for i in range(1,len(types_before)):
if (float(types_before[i])-float(types_after[i])) != 0.0:
new_position = i
break
# define variables for the pair interaction. N_FeFe1_b is the number of Fe-Fe bonds before the move and 'a' stand for after the move.
N_FeFe1_b = 0.0
N_FeFe1_a = 0.0
N_FeFe2_b = 0.0
N_FeFe2_a = 0.0
N_CuCu1_b = 0.0
N_CuCu1_a = 0.0
N_CuCu2_b = 0.0
N_CuCu2_a = 0.0
N_VaVa1_b = 0.0
N_VaVa1_a = 0.0
N_VaVa2_b = 0.0
N_VaVa2_a = 0.0
N_VaFe1_b = 0.0
N_VaFe1_a = 0.0
N_VaFe2_b = 0.0
N_VaFe2_a = 0.0
N_FeVa1_b = 0.0
N_FeVa1_a = 0.0
N_FeVa2_b = 0.0
N_FeVa2_a = 0.0
N_VaCu1_b = 0.0
N_VaCu1_a = 0.0
N_VaCu2_b = 0.0
N_VaCu2_a = 0.0
N_CuVa1_b = 0.0
N_CuVa1_a = 0.0
N_CuVa2_b = 0.0
N_CuVa2_a = 0.0
N_FeCu1_b = 0.0
N_FeCu1_a = 0.0
N_FeCu2_b = 0.0
N_FeCu2_a = 0.0
N_CuFe1_b = 0.0
N_CuFe1_a = 0.0
N_CuFe2_b = 0.0
N_CuFe2_a = 0.0
# find first neighbours of Va before move
count = 0.0
# count the number of bonds at position 0 at all the possible nearest neighbours. Uses NN1 array.
for i in range(8):
count += float(types_before[int(NN1[0][i])])
# floor will reveal the number of 1st nearest neighbour Va-Fe bonds
N_VaFe1_b = floor(count)
# will reveal the number of 1st nearest neighbour Va-Cu bonds
N_VaCu1_b = (count - floor(count))*10.0
# remaining values will be 1st nearest neighbour Va-Va bonds
N_VaVa1_b = abs(8.0 - N_VaFe1_b - N_VaCu1_b)
# same method above, but now for 2nd nearest neighbours. uses NN2 array.
count = 0.0
for i in range(6):
count += float(types_before[int(NN2[0][i])])
N_VaFe2_b = floor(count)
N_VaCu2_b = (count - floor(count))*10.0
N_VaVa2_b = abs(6.0 - N_VaFe2_b - N_VaCu2_b)
# find first neighbours of Va after move
count = 0.0
for i in range(8):
count += float(types_after[int(NN1[new_position][i])])
N_VaFe1_a = floor(count)
N_VaCu1_a = (count - floor(count))*10.0
N_VaVa1_a = abs(8.0 - N_VaFe1_a - N_VaCu1_a)
# find second neighbours of Va after move
count = 0.0
for i in range(6):
count += float(types_after[int(NN2[new_position][i])])
N_VaFe2_a = floor(count)
N_VaCu2_a = (count - floor(count))*10.0
N_VaVa2_a = abs(6.0 - N_VaFe2_a - N_VaCu2_a)
# Find what atom the Va is swapping with - either a Fe (1) or Cu(0.1)
if types_after[0] == "1":
# find first neighbours of Fe before move
count = 0.0
for i in range(8):
count += float(types_before[int(NN1[new_position][i])])
N_FeFe1_b = floor(count)
N_FeCu1_b = (count - floor(count))*10.0
N_FeVa1_b = abs(8.0 - N_FeFe1_b - N_FeCu1_b)
# find second neighbours of Fe before move
count = 0.0
for i in range(6):
count += float(types_before[int(NN2[new_position][i])])
N_FeFe2_b = floor(count)
N_FeCu2_b = (count - floor(count))*10.0
N_FeVa2_b = abs(6.0 - N_FeFe2_b - N_FeCu2_b)
# find first neighbours of Fe after move
count = 0.0
for i in range(8):
count += float(types_after[int(NN1[0][i])])
N_FeFe1_a = floor(count)
N_FeCu1_a = (count - floor(count))*10.0
N_FeVa1_a = abs(8.0 - N_FeFe1_a - N_FeCu1_a)
# find second neighbours of Fe after move
count = 0.0
for i in range(6):
count += float(types_after[int(NN2[0][i])])
N_FeFe2_a = floor(count)
N_FeCu2_a = (count - floor(count))*10.0
N_FeVa2_a = abs(6.0 - N_FeFe2_a - N_FeCu2_a)
else:
# find first neighbours of Cu before move
count = 0.0
for i in range(8):
count += float(types_before[int(NN1[new_position][i])])
N_CuFe1_b = floor(count)
N_CuCu1_b = (count - floor(count))*10.0
N_CuVa1_b = abs(8.0 - N_CuFe1_b - N_CuCu1_b)
# find second neighbours of Cu before move
count = 0.0
for i in range(6):
count += float(types_before[int(NN2[new_position][i])])
N_CuFe2_b = floor(count)
N_CuCu2_b = (count - floor(count))*10.0
N_CuVa2_b = abs(6.0 - N_CuFe2_b - N_CuCu2_b)
# find first neighbours of Cu after move
count = 0.0
for i in range(8):
count += float(types_after[int(NN1[0][i])])
N_CuFe1_a = floor(count)
N_CuCu1_a = (count - floor(count))*10.0
N_CuVa1_a = abs(8.0 - N_CuFe1_a - N_CuCu1_a)
# find second neighbours of Cu after move
count = 0.0
for i in range(6):
count += float(types_after[int(NN2[0][i])])
N_CuFe2_a = floor(count)
N_CuCu2_a = (count - floor(count))*10.0
N_CuVa2_a = abs(6.0 - N_CuFe2_a - N_CuCu2_a)
# find the difference before and after the jump bonds.
D_N_FeFe1 = N_FeFe1_a - N_FeFe1_b
D_N_FeFe2 = N_FeFe2_a - N_FeFe2_b
D_N_CuCu1 = N_CuCu1_a - N_CuCu1_b
D_N_CuCu2 = N_CuCu2_a - N_CuCu2_b
D_N_VaVa1 = N_VaVa1_a - N_VaVa1_b
D_N_VaVa2 = N_VaVa2_a - N_VaVa2_b
D_N_VaFe1 = N_VaFe1_a + N_FeVa1_a - N_VaFe1_b - N_FeVa1_b
D_N_VaFe2 = N_VaFe2_a + N_FeVa2_a - N_VaFe2_b - N_FeVa2_b
D_N_VaCu1 = N_VaCu1_a + N_CuVa1_a - N_VaCu1_b - N_CuVa1_b
D_N_VaCu2 = N_VaCu2_a + N_CuVa2_a - N_VaCu2_b - N_CuVa2_b
D_N_FeCu1 = N_FeCu1_a + N_CuFe1_a - N_FeCu1_b - N_CuFe1_b
D_N_FeCu2 = N_FeCu2_a + N_CuFe2_a - N_FeCu2_b - N_CuFe2_b
# binding energy calculation
E_b = (D_N_VaVa1*e_VaVa1 +
D_N_VaFe1*e_VaFe1 +
D_N_FeFe1*e_FeFe1 +
D_N_VaVa2*e_VaVa2 +
D_N_VaFe2*e_VaFe2 +
D_N_FeFe2*e_FeFe2 +
D_N_VaCu1*e_VaCu1 +
D_N_VaCu2*e_VaCu2 +
D_N_FeCu1*e_FeCu1 +
D_N_FeCu2*e_FeCu2 +
D_N_CuCu1*e_CuCu1 +
D_N_CuCu2*e_CuCu2)/2
# if the atom is eith Fe (1) or Cu (0.1) and calculate the rate
if types_after[0] == '1':
E = E_m_Fe + E_b
rate = v_Fe*math.exp(-E/kT)
else:
E = E_m_Cu + E_b
rate = v_Cu*math.exp(-E/kT)
# commented out this code -- it is the implementation of second nearest neighbours hops. need to define new E_m and v_Fe/Cu values, as they are different for first and second neighbour hops.
"""
distance_sq = (geometry[new_position][0] - geometry[0][0] )**2 + (geometry[new_position][1] - geometry[0][1] )**2 + (geometry[new_position][2] - geometry[0][2] )**2
if types_after[0] == '1' and distance_sq == 0.75:
E = E_m_Fe_1 + E_b
rate = v_Fe_1*math.exp(-E/kT)
print("types_after[0] == '1' and distance_sq == 0.75:")
elif types_after[0] == '1' and distance_sq == 1.0:
E = E_m_Fe_2 + E_b
rate = v_Fe_2*math.exp(-E/kT)
print("types_after[0] == '1' and distance_sq == 1.0:")
elif types_after[0] == '0.1' and distance_sq == 0.75:
E = E_m_Cu_1 + E_b
rate = v_Cu_1*math.exp(-E/kT)
print("types_after[0] == '0.1' and distance_sq == 0.75:")
elif types_after[0] == '0.1' and distance_sq == 1.0:
E = E_m_Cu_2 + E_b
rate = v_Cu_2*math.exp(-E/kT)
print("types_after[0] == '0.1' and distance_sq == 1.0:")
# print out useful variables for diagnostics
print("----------------------- CHECK -----------------------")
print("Iteration = %i"%(self._times_called))
print("Vacancy moved to %i\n"%(new_position))
print("D_N_FeFe1 = %.0f"%(D_N_FeFe1))
print("D_N_VaFe1 = %.0f"%(D_N_VaFe1))
print("D_N_VaVa1 = %.0f"%(D_N_VaVa1))
print("D_N_VaCu1 = %.0f"%(D_N_VaCu1))
print("D_N_FeCu1 = %.0f"%(D_N_FeCu1))
print("D_N_CuCu1 = %.0f\n"%(D_N_CuCu1))
print("D_N_FeFe2 = %.0f"%(D_N_FeFe2))
print("D_N_VaFe2 = %.0f"%(D_N_VaFe2))
print("D_N_VaVa2 = %.0f"%(D_N_VaVa2))
print("D_N_VaCu2 = %.0f"%(D_N_VaCu2))
print("D_N_FeCu2 = %.0f"%(D_N_FeCu2))
print("D_N_CuCu2 = %.0f\n"%(D_N_CuCu2))
print("E_b = %.2f eV"%(E_b))
print("E = %.4f eV"%(E))
print ("Rate = %f\n"%(rate))
"""
# return the new rate value
return rate
def cutoff(self):
# cutoff value for types_before and types_after lattice points. 2.0 = two supercells.
return 2.0
|
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for template engine.
"""
from _common import unittest
from beets.util import functemplate
def _normexpr(expr):
"""Normalize an Expression object's parts, collapsing multiple
adjacent text blocks and removing empty text blocks. Generates a
sequence of parts.
"""
textbuf = []
for part in expr.parts:
if isinstance(part, basestring):
textbuf.append(part)
else:
if textbuf:
text = u''.join(textbuf)
if text:
yield text
textbuf = []
yield part
if textbuf:
text = u''.join(textbuf)
if text:
yield text
def _normparse(text):
"""Parse a template and then normalize the resulting Expression."""
return _normexpr(functemplate._parse(text))
class ParseTest(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(list(_normparse(u'')), [])
def _assert_symbol(self, obj, ident):
"""Assert that an object is a Symbol with the given identifier.
"""
self.assertTrue(isinstance(obj, functemplate.Symbol),
u"not a Symbol: %s" % repr(obj))
self.assertEqual(obj.ident, ident,
u"wrong identifier: %s vs. %s" %
(repr(obj.ident), repr(ident)))
def _assert_call(self, obj, ident, numargs):
"""Assert that an object is a Call with the given identifier and
argument count.
"""
self.assertTrue(isinstance(obj, functemplate.Call),
u"not a Call: %s" % repr(obj))
self.assertEqual(obj.ident, ident,
u"wrong identifier: %s vs. %s" %
(repr(obj.ident), repr(ident)))
self.assertEqual(len(obj.args), numargs,
u"wrong argument count in %s: %i vs. %i" %
(repr(obj.ident), len(obj.args), numargs))
def test_plain_text(self):
self.assertEqual(list(_normparse(u'hello world')), [u'hello world'])
def test_escaped_character_only(self):
self.assertEqual(list(_normparse(u'$$')), [u'$'])
def test_escaped_character_in_text(self):
self.assertEqual(list(_normparse(u'a $$ b')), [u'a $ b'])
def test_escaped_character_at_start(self):
self.assertEqual(list(_normparse(u'$$ hello')), [u'$ hello'])
def test_escaped_character_at_end(self):
self.assertEqual(list(_normparse(u'hello $$')), [u'hello $'])
def test_escaped_function_delim(self):
self.assertEqual(list(_normparse(u'a $% b')), [u'a % b'])
def test_escaped_sep(self):
self.assertEqual(list(_normparse(u'a $, b')), [u'a , b'])
def test_escaped_close_brace(self):
self.assertEqual(list(_normparse(u'a $} b')), [u'a } b'])
def test_bare_value_delim_kept_intact(self):
self.assertEqual(list(_normparse(u'a $ b')), [u'a $ b'])
def test_bare_function_delim_kept_intact(self):
self.assertEqual(list(_normparse(u'a % b')), [u'a % b'])
def test_bare_opener_kept_intact(self):
self.assertEqual(list(_normparse(u'a { b')), [u'a { b'])
def test_bare_closer_kept_intact(self):
self.assertEqual(list(_normparse(u'a } b')), [u'a } b'])
def test_bare_sep_kept_intact(self):
self.assertEqual(list(_normparse(u'a , b')), [u'a , b'])
def test_symbol_alone(self):
parts = list(_normparse(u'$foo'))
self.assertEqual(len(parts), 1)
self._assert_symbol(parts[0], u"foo")
def test_symbol_in_text(self):
parts = list(_normparse(u'hello $foo world'))
self.assertEqual(len(parts), 3)
self.assertEqual(parts[0], u'hello ')
self._assert_symbol(parts[1], u"foo")
self.assertEqual(parts[2], u' world')
def test_symbol_with_braces(self):
parts = list(_normparse(u'hello${foo}world'))
self.assertEqual(len(parts), 3)
self.assertEqual(parts[0], u'hello')
self._assert_symbol(parts[1], u"foo")
self.assertEqual(parts[2], u'world')
def test_unclosed_braces_symbol(self):
self.assertEqual(list(_normparse(u'a ${ b')), [u'a ${ b'])
def test_empty_braces_symbol(self):
self.assertEqual(list(_normparse(u'a ${} b')), [u'a ${} b'])
def test_call_without_args_at_end(self):
self.assertEqual(list(_normparse(u'foo %bar')), [u'foo %bar'])
def test_call_without_args(self):
self.assertEqual(list(_normparse(u'foo %bar baz')), [u'foo %bar baz'])
def test_call_with_unclosed_args(self):
self.assertEqual(list(_normparse(u'foo %bar{ baz')),
[u'foo %bar{ baz'])
def test_call_with_unclosed_multiple_args(self):
self.assertEqual(list(_normparse(u'foo %bar{bar,bar baz')),
[u'foo %bar{bar,bar baz'])
def test_call_empty_arg(self):
parts = list(_normparse(u'%foo{}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 1)
self.assertEqual(list(_normexpr(parts[0].args[0])), [])
def test_call_single_arg(self):
parts = list(_normparse(u'%foo{bar}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 1)
self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar'])
def test_call_two_args(self):
parts = list(_normparse(u'%foo{bar,baz}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 2)
self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar'])
self.assertEqual(list(_normexpr(parts[0].args[1])), [u'baz'])
def test_call_with_escaped_sep(self):
parts = list(_normparse(u'%foo{bar$,baz}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 1)
self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar,baz'])
def test_call_with_escaped_close(self):
parts = list(_normparse(u'%foo{bar$}baz}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 1)
self.assertEqual(list(_normexpr(parts[0].args[0])), [u'bar}baz'])
def test_call_with_symbol_argument(self):
parts = list(_normparse(u'%foo{$bar,baz}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 2)
arg_parts = list(_normexpr(parts[0].args[0]))
self.assertEqual(len(arg_parts), 1)
self._assert_symbol(arg_parts[0], u"bar")
self.assertEqual(list(_normexpr(parts[0].args[1])), [u"baz"])
def test_call_with_nested_call_argument(self):
parts = list(_normparse(u'%foo{%bar{},baz}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 2)
arg_parts = list(_normexpr(parts[0].args[0]))
self.assertEqual(len(arg_parts), 1)
self._assert_call(arg_parts[0], u"bar", 1)
self.assertEqual(list(_normexpr(parts[0].args[1])), [u"baz"])
def test_nested_call_with_argument(self):
parts = list(_normparse(u'%foo{%bar{baz}}'))
self.assertEqual(len(parts), 1)
self._assert_call(parts[0], u"foo", 1)
arg_parts = list(_normexpr(parts[0].args[0]))
self.assertEqual(len(arg_parts), 1)
self._assert_call(arg_parts[0], u"bar", 1)
self.assertEqual(list(_normexpr(arg_parts[0].args[0])), [u'baz'])
class EvalTest(unittest.TestCase):
def _eval(self, template):
values = {
u'foo': u'bar',
u'baz': u'BaR',
}
functions = {
u'lower': unicode.lower,
u'len': len,
}
return functemplate.Template(template).substitute(values, functions)
def test_plain_text(self):
self.assertEqual(self._eval(u"foo"), u"foo")
def test_subtitute_value(self):
self.assertEqual(self._eval(u"$foo"), u"bar")
def test_subtitute_value_in_text(self):
self.assertEqual(self._eval(u"hello $foo world"), u"hello bar world")
def test_not_subtitute_undefined_value(self):
self.assertEqual(self._eval(u"$bar"), u"$bar")
def test_function_call(self):
self.assertEqual(self._eval(u"%lower{FOO}"), u"foo")
def test_function_call_with_text(self):
self.assertEqual(self._eval(u"A %lower{FOO} B"), u"A foo B")
def test_nested_function_call(self):
self.assertEqual(self._eval(u"%lower{%lower{FOO}}"), u"foo")
def test_symbol_in_argument(self):
self.assertEqual(self._eval(u"%lower{$baz}"), u"bar")
def test_function_call_exception(self):
res = self._eval(u"%lower{a,b,c,d,e}")
self.assertTrue(isinstance(res, basestring))
def test_function_returning_integer(self):
self.assertEqual(self._eval(u"%len{foo}"), u"3")
def test_not_subtitute_undefined_func(self):
self.assertEqual(self._eval(u"%bar{}"), u"%bar{}")
def test_not_subtitute_func_with_no_args(self):
self.assertEqual(self._eval(u"%lower"), u"%lower")
def test_function_call_with_empty_arg(self):
self.assertEqual(self._eval(u"%len{}"), u"0")
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# pylint: disable=invalid-name
#####################################################################
#This software was developed by the University of Tennessee as part of the
#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
#project funded by the US National Science Foundation.
#See the license text in license.txt
#copyright 2010, University of Tennessee
######################################################################
"""
This module implements invariant and its related computations.
:author: Gervaise B. Alina/UTK
:author: Mathieu Doucet/UTK
:author: Jae Cho/UTK
"""
import math
import numpy as np
from sas.sascalc.dataloader.data_info import Data1D as LoaderData1D
# The minimum q-value to be used when extrapolating
Q_MINIMUM = 1e-5
# The maximum q-value to be used when extrapolating
Q_MAXIMUM = 10
# Number of steps in the extrapolation
INTEGRATION_NSTEPS = 1000
class Transform(object):
"""
Define interface that need to compute a function or an inverse
function given some x, y
"""
def linearize_data(self, data):
"""
Linearize data so that a linear fit can be performed.
Filter out the data that can't be transformed.
:param data: LoadData1D instance
"""
# Check that the vector lengths are equal
assert len(data.x) == len(data.y)
if data.dy is not None:
assert len(data.x) == len(data.dy)
dy = data.dy
else:
dy = np.ones(len(data.y))
# Transform the data
data_points = zip(data.x, data.y, dy)
output_points = [(self.linearize_q_value(p[0]),
math.log(p[1]),
p[2] / p[1]) for p in data_points if p[0] > 0 and \
p[1] > 0 and p[2] > 0]
x_out, y_out, dy_out = zip(*output_points)
# Create Data1D object
x_out = np.asarray(x_out)
y_out = np.asarray(y_out)
dy_out = np.asarray(dy_out)
linear_data = LoaderData1D(x=x_out, y=y_out, dy=dy_out)
return linear_data
def get_allowed_bins(self, data):
"""
Goes through the data points and returns a list of boolean values
to indicate whether each points is allowed by the model or not.
:param data: Data1D object
"""
return [p[0] > 0 and p[1] > 0 and p[2] > 0 for p in zip(data.x, data.y,
data.dy)]
def linearize_q_value(self, value):
"""
Transform the input q-value for linearization
"""
return NotImplemented
def extract_model_parameters(self, constant, slope, dconstant=0, dslope=0):
"""
set private member
"""
return NotImplemented
def evaluate_model(self, x):
"""
Returns an array f(x) values where f is the Transform function.
"""
return NotImplemented
def evaluate_model_errors(self, x):
"""
Returns an array of I(q) errors
"""
return NotImplemented
class Guinier(Transform):
"""
class of type Transform that performs operations related to guinier
function
"""
def __init__(self, scale=1, radius=60):
Transform.__init__(self)
self.scale = scale
self.radius = radius
## Uncertainty of scale parameter
self.dscale = 0
## Unvertainty of radius parameter
self.dradius = 0
def linearize_q_value(self, value):
"""
Transform the input q-value for linearization
:param value: q-value
:return: q*q
"""
return value * value
def extract_model_parameters(self, constant, slope, dconstant=0, dslope=0):
"""
assign new value to the scale and the radius
"""
self.scale = math.exp(constant)
if slope > 0:
slope = 0.0
self.radius = math.sqrt(-3 * slope)
# Errors
self.dscale = math.exp(constant) * dconstant
if slope == 0.0:
n_zero = -1.0e-24
self.dradius = -3.0 / 2.0 / math.sqrt(-3 * n_zero) * dslope
else:
self.dradius = -3.0 / 2.0 / math.sqrt(-3 * slope) * dslope
return [self.radius, self.scale], [self.dradius, self.dscale]
def evaluate_model(self, x):
"""
return F(x)= scale* e-((radius*x)**2/3)
"""
return self._guinier(x)
def evaluate_model_errors(self, x):
"""
Returns the error on I(q) for the given array of q-values
:param x: array of q-values
"""
p1 = np.array([self.dscale * math.exp(-((self.radius * q) ** 2 / 3)) \
for q in x])
p2 = np.array([self.scale * math.exp(-((self.radius * q) ** 2 / 3))\
* (-(q ** 2 / 3)) * 2 * self.radius * self.dradius for q in x])
diq2 = p1 * p1 + p2 * p2
return np.array([math.sqrt(err) for err in diq2])
def _guinier(self, x):
"""
Retrieve the guinier function after apply an inverse guinier function
to x
Compute a F(x) = scale* e-((radius*x)**2/3).
:param x: a vector of q values
:param scale: the scale value
:param radius: the guinier radius value
:return: F(x)
"""
# transform the radius of coming from the inverse guinier function to a
# a radius of a guinier function
if self.radius <= 0:
msg = "Rg expected positive value, but got %s" % self.radius
raise ValueError(msg)
value = np.array([math.exp(-((self.radius * i) ** 2 / 3)) for i in x])
return self.scale * value
class PowerLaw(Transform):
"""
class of type transform that perform operation related to power_law
function
"""
def __init__(self, scale=1, power=4):
Transform.__init__(self)
self.scale = scale
self.power = power
self.dscale = 0.0
self.dpower = 0.0
def linearize_q_value(self, value):
"""
Transform the input q-value for linearization
:param value: q-value
:return: log(q)
"""
return math.log(value)
def extract_model_parameters(self, constant, slope, dconstant=0, dslope=0):
"""
Assign new value to the scale and the power
"""
self.power = -slope
self.scale = math.exp(constant)
# Errors
self.dscale = math.exp(constant) * dconstant
self.dpower = -dslope
return [self.power, self.scale], [self.dpower, self.dscale]
def evaluate_model(self, x):
"""
given a scale and a radius transform x, y using a power_law
function
"""
return self._power_law(x)
def evaluate_model_errors(self, x):
"""
Returns the error on I(q) for the given array of q-values
:param x: array of q-values
"""
p1 = np.array([self.dscale * math.pow(q, -self.power) for q in x])
p2 = np.array([self.scale * self.power * math.pow(q, -self.power - 1)\
* self.dpower for q in x])
diq2 = p1 * p1 + p2 * p2
return np.array([math.sqrt(err) for err in diq2])
def _power_law(self, x):
"""
F(x) = scale* (x)^(-power)
when power= 4. the model is porod
else power_law
The model has three parameters: ::
1. x: a vector of q values
2. power: power of the function
3. scale : scale factor value
:param x: array
:return: F(x)
"""
if self.power <= 0:
msg = "Power_law function expected positive power,"
msg += " but got %s" % self.power
raise ValueError(msg)
if self.scale <= 0:
msg = "scale expected positive value, but got %s" % self.scale
raise ValueError(msg)
value = np.array([math.pow(i, -self.power) for i in x])
return self.scale * value
class Extrapolator(object):
"""
Extrapolate I(q) distribution using a given model
"""
def __init__(self, data, model=None):
"""
Determine a and b given a linear equation y = ax + b
If a model is given, it will be used to linearize the data before
the extrapolation is performed. If None,
a simple linear fit will be done.
:param data: data containing x and y such as y = ax + b
:param model: optional Transform object
"""
self.data = data
self.model = model
# Set qmin as the lowest non-zero value
self.qmin = Q_MINIMUM
for q_value in self.data.x:
if q_value > 0:
self.qmin = q_value
break
self.qmax = max(self.data.x)
def fit(self, power=None, qmin=None, qmax=None):
"""
Fit data for y = ax + b return a and b
:param power: a fixed, otherwise None
:param qmin: Minimum Q-value
:param qmax: Maximum Q-value
"""
if qmin is None:
qmin = self.qmin
if qmax is None:
qmax = self.qmax
# Identify the bin range for the fit
idx = (self.data.x >= qmin) & (self.data.x <= qmax)
fx = np.zeros(len(self.data.x))
# Uncertainty
if type(self.data.dy) == np.ndarray and \
len(self.data.dy) == len(self.data.x) and \
np.all(self.data.dy > 0):
sigma = self.data.dy
else:
sigma = np.ones(len(self.data.x))
# Compute theory data f(x)
fx[idx] = self.data.y[idx]
# Linearize the data
if self.model is not None:
linearized_data = self.model.linearize_data(\
LoaderData1D(self.data.x[idx],
fx[idx],
dy=sigma[idx]))
else:
linearized_data = LoaderData1D(self.data.x[idx],
fx[idx],
dy=sigma[idx])
##power is given only for function = power_law
if power is not None:
sigma2 = linearized_data.dy * linearized_data.dy
a = -(power)
b = (np.sum(linearized_data.y / sigma2) \
- a * np.sum(linearized_data.x / sigma2)) / np.sum(1.0 / sigma2)
deltas = linearized_data.x * a + \
np.ones(len(linearized_data.x)) * b - linearized_data.y
residuals = np.sum(deltas * deltas / sigma2)
err = math.fabs(residuals) / np.sum(1.0 / sigma2)
return [a, b], [0, math.sqrt(err)]
else:
A = np.vstack([linearized_data.x / linearized_data.dy, 1.0 / linearized_data.dy]).T
(p, residuals, _, _) = np.linalg.lstsq(A, linearized_data.y / linearized_data.dy)
# Get the covariance matrix, defined as inv_cov = a_transposed * a
err = np.zeros(2)
try:
inv_cov = np.dot(A.transpose(), A)
cov = np.linalg.pinv(inv_cov)
err_matrix = math.fabs(residuals) * cov
err = [math.sqrt(err_matrix[0][0]), math.sqrt(err_matrix[1][1])]
except:
err = [-1.0, -1.0]
return p, err
class InvariantCalculator(object):
"""
Compute invariant if data is given.
Can provide volume fraction and surface area if the user provides
Porod constant and contrast values.
:precondition: the user must send a data of type DataLoader.Data1D
the user provide background and scale values.
:note: Some computations depends on each others.
"""
def __init__(self, data, background=0, scale=1):
"""
Initialize variables.
:param data: data must be of type DataLoader.Data1D
:param background: Background value. The data will be corrected
before processing
:param scale: Scaling factor for I(q). The data will be corrected
before processing
"""
# Background and scale should be private data member if the only way to
# change them are by instantiating a new object.
self._background = background
self._scale = scale
# slit height for smeared data
self._smeared = None
# The data should be private
self._data = self._get_data(data)
# get the dxl if the data is smeared: This is done only once on init.
if self._data.dxl is not None and self._data.dxl.all() > 0:
# assumes constant dxl
self._smeared = self._data.dxl[0]
# Since there are multiple variants of Q*, you should force the
# user to use the get method and keep Q* a private data member
self._qstar = None
# You should keep the error on Q* so you can reuse it without
# recomputing the whole thing.
self._qstar_err = 0
# Extrapolation parameters
self._low_extrapolation_npts = 4
self._low_extrapolation_function = Guinier()
self._low_extrapolation_power = None
self._low_extrapolation_power_fitted = None
self._high_extrapolation_npts = 4
self._high_extrapolation_function = PowerLaw()
self._high_extrapolation_power = None
self._high_extrapolation_power_fitted = None
# Extrapolation range
self._low_q_limit = Q_MINIMUM
def _get_data(self, data):
"""
:note: this function must be call before computing any type
of invariant
:return: new data = self._scale *data - self._background
"""
if not issubclass(data.__class__, LoaderData1D):
#Process only data that inherited from DataLoader.Data_info.Data1D
raise ValueError, "Data must be of type DataLoader.Data1D"
#from copy import deepcopy
new_data = (self._scale * data) - self._background
# Check that the vector lengths are equal
assert len(new_data.x) == len(new_data.y)
# Verify that the errors are set correctly
if new_data.dy is None or len(new_data.x) != len(new_data.dy) or \
(min(new_data.dy) == 0 and max(new_data.dy) == 0):
new_data.dy = np.ones(len(new_data.x))
return new_data
def _fit(self, model, qmin=Q_MINIMUM, qmax=Q_MAXIMUM, power=None):
"""
fit data with function using
data = self._get_data()
fx = Functor(data , function)
y = data.y
slope, constant = linalg.lstsq(y,fx)
:param qmin: data first q value to consider during the fit
:param qmax: data last q value to consider during the fit
:param power : power value to consider for power-law
:param function: the function to use during the fit
:return a: the scale of the function
:return b: the other parameter of the function for guinier will be radius
for power_law will be the power value
"""
extrapolator = Extrapolator(data=self._data, model=model)
p, dp = extrapolator.fit(power=power, qmin=qmin, qmax=qmax)
return model.extract_model_parameters(constant=p[1], slope=p[0],
dconstant=dp[1], dslope=dp[0])
def _get_qstar(self, data):
"""
Compute invariant for pinhole data.
This invariant is given by: ::
q_star = x0**2 *y0 *dx0 +x1**2 *y1 *dx1
+ ..+ xn**2 *yn *dxn for non smeared data
q_star = dxl0 *x0 *y0 *dx0 +dxl1 *x1 *y1 *dx1
+ ..+ dlxn *xn *yn *dxn for smeared data
where n >= len(data.x)-1
dxl = slit height dQl
dxi = 1/2*(xi+1 - xi) + (xi - xi-1)
dx0 = (x1 - x0)/2
dxn = (xn - xn-1)/2
:param data: the data to use to compute invariant.
:return q_star: invariant value for pinhole data. q_star > 0
"""
if len(data.x) <= 1 or len(data.y) <= 1 or len(data.x) != len(data.y):
msg = "Length x and y must be equal"
msg += " and greater than 1; got x=%s, y=%s" % (len(data.x), len(data.y))
raise ValueError, msg
else:
# Take care of smeared data
if self._smeared is None:
gx = data.x * data.x
# assumes that len(x) == len(dxl).
else:
gx = data.dxl * data.x
n = len(data.x) - 1
#compute the first delta q
dx0 = (data.x[1] - data.x[0]) / 2
#compute the last delta q
dxn = (data.x[n] - data.x[n - 1]) / 2
total = 0
total += gx[0] * data.y[0] * dx0
total += gx[n] * data.y[n] * dxn
if len(data.x) == 2:
return total
else:
#iterate between for element different
#from the first and the last
for i in xrange(1, n - 1):
dxi = (data.x[i + 1] - data.x[i - 1]) / 2
total += gx[i] * data.y[i] * dxi
return total
def _get_qstar_uncertainty(self, data):
"""
Compute invariant uncertainty with with pinhole data.
This uncertainty is given as follow: ::
dq_star = math.sqrt[(x0**2*(dy0)*dx0)**2 +
(x1**2 *(dy1)*dx1)**2 + ..+ (xn**2 *(dyn)*dxn)**2 ]
where n >= len(data.x)-1
dxi = 1/2*(xi+1 - xi) + (xi - xi-1)
dx0 = (x1 - x0)/2
dxn = (xn - xn-1)/2
dyn: error on dy
:param data:
:note: if data doesn't contain dy assume dy= math.sqrt(data.y)
"""
if len(data.x) <= 1 or len(data.y) <= 1 or \
len(data.x) != len(data.y) or \
(data.dy is not None and (len(data.dy) != len(data.y))):
msg = "Length of data.x and data.y must be equal"
msg += " and greater than 1; got x=%s, y=%s" % (len(data.x), len(data.y))
raise ValueError, msg
else:
#Create error for data without dy error
if data.dy is None:
dy = math.sqrt(data.y)
else:
dy = data.dy
# Take care of smeared data
if self._smeared is None:
gx = data.x * data.x
# assumes that len(x) == len(dxl).
else:
gx = data.dxl * data.x
n = len(data.x) - 1
#compute the first delta
dx0 = (data.x[1] - data.x[0]) / 2
#compute the last delta
dxn = (data.x[n] - data.x[n - 1]) / 2
total = 0
total += (gx[0] * dy[0] * dx0) ** 2
total += (gx[n] * dy[n] * dxn) ** 2
if len(data.x) == 2:
return math.sqrt(total)
else:
#iterate between for element different
#from the first and the last
for i in xrange(1, n - 1):
dxi = (data.x[i + 1] - data.x[i - 1]) / 2
total += (gx[i] * dy[i] * dxi) ** 2
return math.sqrt(total)
def _get_extrapolated_data(self, model, npts=INTEGRATION_NSTEPS,
q_start=Q_MINIMUM, q_end=Q_MAXIMUM):
"""
:return: extrapolate data create from data
"""
#create new Data1D to compute the invariant
q = np.linspace(start=q_start,
stop=q_end,
num=npts,
endpoint=True)
iq = model.evaluate_model(q)
diq = model.evaluate_model_errors(q)
result_data = LoaderData1D(x=q, y=iq, dy=diq)
if self._smeared is not None:
result_data.dxl = self._smeared * np.ones(len(q))
return result_data
def get_data(self):
"""
:return: self._data
"""
return self._data
def get_extrapolation_power(self, range='high'):
"""
:return: the fitted power for power law function for a given
extrapolation range
"""
if range == 'low':
return self._low_extrapolation_power_fitted
return self._high_extrapolation_power_fitted
def get_qstar_low(self):
"""
Compute the invariant for extrapolated data at low q range.
Implementation:
data = self._get_extra_data_low()
return self._get_qstar()
:return q_star: the invariant for data extrapolated at low q.
"""
# Data boundaries for fitting
qmin = self._data.x[0]
qmax = self._data.x[self._low_extrapolation_npts - 1]
# Extrapolate the low-Q data
p, _ = self._fit(model=self._low_extrapolation_function,
qmin=qmin,
qmax=qmax,
power=self._low_extrapolation_power)
self._low_extrapolation_power_fitted = p[0]
# Distribution starting point
self._low_q_limit = Q_MINIMUM
if Q_MINIMUM >= qmin:
self._low_q_limit = qmin / 10
data = self._get_extrapolated_data(\
model=self._low_extrapolation_function,
npts=INTEGRATION_NSTEPS,
q_start=self._low_q_limit, q_end=qmin)
# Systematic error
# If we have smearing, the shape of the I(q) distribution at low Q will
# may not be a Guinier or simple power law. The following is
# a conservative estimation for the systematic error.
err = qmin * qmin * math.fabs((qmin - self._low_q_limit) * \
(data.y[0] - data.y[INTEGRATION_NSTEPS - 1]))
return self._get_qstar(data), self._get_qstar_uncertainty(data) + err
def get_qstar_high(self):
"""
Compute the invariant for extrapolated data at high q range.
Implementation:
data = self._get_extra_data_high()
return self._get_qstar()
:return q_star: the invariant for data extrapolated at high q.
"""
# Data boundaries for fitting
x_len = len(self._data.x) - 1
qmin = self._data.x[x_len - (self._high_extrapolation_npts - 1)]
qmax = self._data.x[x_len]
# fit the data with a model to get the appropriate parameters
p, _ = self._fit(model=self._high_extrapolation_function,
qmin=qmin,
qmax=qmax,
power=self._high_extrapolation_power)
self._high_extrapolation_power_fitted = p[0]
#create new Data1D to compute the invariant
data = self._get_extrapolated_data(\
model=self._high_extrapolation_function,
npts=INTEGRATION_NSTEPS,
q_start=qmax, q_end=Q_MAXIMUM)
return self._get_qstar(data), self._get_qstar_uncertainty(data)
def get_extra_data_low(self, npts_in=None, q_start=None, npts=20):
"""
Returns the extrapolated data used for the loew-Q invariant calculation.
By default, the distribution will cover the data points used for the
extrapolation. The number of overlap points is a parameter (npts_in).
By default, the maximum q-value of the distribution will be
the minimum q-value used when extrapolating for the purpose of the
invariant calculation.
:param npts_in: number of data points for which
the extrapolated data overlap
:param q_start: is the minimum value to uses for extrapolated data
:param npts: the number of points in the extrapolated distribution
"""
# Get extrapolation range
if q_start is None:
q_start = self._low_q_limit
if npts_in is None:
npts_in = self._low_extrapolation_npts
q_end = self._data.x[max(0, npts_in - 1)]
if q_start >= q_end:
return np.zeros(0), np.zeros(0)
return self._get_extrapolated_data(\
model=self._low_extrapolation_function,
npts=npts,
q_start=q_start, q_end=q_end)
def get_extra_data_high(self, npts_in=None, q_end=Q_MAXIMUM, npts=20):
"""
Returns the extrapolated data used for the high-Q invariant calculation.
By default, the distribution will cover the data points used for the
extrapolation. The number of overlap points is a parameter (npts_in).
By default, the maximum q-value of the distribution will be Q_MAXIMUM,
the maximum q-value used when extrapolating for the purpose of the
invariant calculation.
:param npts_in: number of data points for which the
extrapolated data overlap
:param q_end: is the maximum value to uses for extrapolated data
:param npts: the number of points in the extrapolated distribution
"""
# Get extrapolation range
if npts_in is None:
npts_in = self._high_extrapolation_npts
_npts = len(self._data.x)
q_start = self._data.x[min(_npts, _npts - npts_in)]
if q_start >= q_end:
return np.zeros(0), np.zeros(0)
return self._get_extrapolated_data(\
model=self._high_extrapolation_function,
npts=npts,
q_start=q_start, q_end=q_end)
def set_extrapolation(self, range, npts=4, function=None, power=None):
"""
Set the extrapolation parameters for the high or low Q-range.
Note that this does not turn extrapolation on or off.
:param range: a keyword set the type of extrapolation . type string
:param npts: the numbers of q points of data to consider
for extrapolation
:param function: a keyword to select the function to use
for extrapolation.
of type string.
:param power: an power to apply power_low function
"""
range = range.lower()
if range not in ['high', 'low']:
raise ValueError, "Extrapolation range should be 'high' or 'low'"
function = function.lower()
if function not in ['power_law', 'guinier']:
msg = "Extrapolation function should be 'guinier' or 'power_law'"
raise ValueError, msg
if range == 'high':
if function != 'power_law':
msg = "Extrapolation only allows a power law at high Q"
raise ValueError, msg
self._high_extrapolation_npts = npts
self._high_extrapolation_power = power
self._high_extrapolation_power_fitted = power
else:
if function == 'power_law':
self._low_extrapolation_function = PowerLaw()
else:
self._low_extrapolation_function = Guinier()
self._low_extrapolation_npts = npts
self._low_extrapolation_power = power
self._low_extrapolation_power_fitted = power
def get_qstar(self, extrapolation=None):
"""
Compute the invariant of the local copy of data.
:param extrapolation: string to apply optional extrapolation
:return q_star: invariant of the data within data's q range
:warning: When using setting data to Data1D ,
the user is responsible of
checking that the scale and the background are
properly apply to the data
"""
self._qstar = self._get_qstar(self._data)
self._qstar_err = self._get_qstar_uncertainty(self._data)
if extrapolation is None:
return self._qstar
# Compute invariant plus invariant of extrapolated data
extrapolation = extrapolation.lower()
if extrapolation == "low":
qs_low, dqs_low = self.get_qstar_low()
qs_hi, dqs_hi = 0, 0
elif extrapolation == "high":
qs_low, dqs_low = 0, 0
qs_hi, dqs_hi = self.get_qstar_high()
elif extrapolation == "both":
qs_low, dqs_low = self.get_qstar_low()
qs_hi, dqs_hi = self.get_qstar_high()
self._qstar += qs_low + qs_hi
self._qstar_err = math.sqrt(self._qstar_err * self._qstar_err \
+ dqs_low * dqs_low + dqs_hi * dqs_hi)
return self._qstar
def get_surface(self, contrast, porod_const, extrapolation=None):
"""
Compute the specific surface from the data.
Implementation::
V = self.get_volume_fraction(contrast, extrapolation)
Compute the surface given by:
surface = (2*pi *V(1- V)*porod_const)/ q_star
:param contrast: contrast value to compute the volume
:param porod_const: Porod constant to compute the surface
:param extrapolation: string to apply optional extrapolation
:return: specific surface
"""
# Compute the volume
volume = self.get_volume_fraction(contrast, extrapolation)
return 2 * math.pi * volume * (1 - volume) * \
float(porod_const) / self._qstar
def get_volume_fraction(self, contrast, extrapolation=None):
"""
Compute volume fraction is deduced as follow: ::
q_star = 2*(pi*contrast)**2* volume( 1- volume)
for k = 10^(-8)*q_star/(2*(pi*|contrast|)**2)
we get 2 values of volume:
with 1 - 4 * k >= 0
volume1 = (1- sqrt(1- 4*k))/2
volume2 = (1+ sqrt(1- 4*k))/2
q_star: the invariant value included extrapolation is applied
unit 1/A^(3)*1/cm
q_star = self.get_qstar()
the result returned will be 0 <= volume <= 1
:param contrast: contrast value provides by the user of type float.
contrast unit is 1/A^(2)= 10^(16)cm^(2)
:param extrapolation: string to apply optional extrapolation
:return: volume fraction
:note: volume fraction must have no unit
"""
if contrast <= 0:
raise ValueError, "The contrast parameter must be greater than zero"
# Make sure Q star is up to date
self.get_qstar(extrapolation)
if self._qstar <= 0:
msg = "Invalid invariant: Invariant Q* must be greater than zero"
raise RuntimeError, msg
# Compute intermediate constant
k = 1.e-8 * self._qstar / (2 * (math.pi * math.fabs(float(contrast))) ** 2)
# Check discriminant value
discrim = 1 - 4 * k
# Compute volume fraction
if discrim < 0:
msg = "Could not compute the volume fraction: negative discriminant"
raise RuntimeError, msg
elif discrim == 0:
return 1 / 2
else:
volume1 = 0.5 * (1 - math.sqrt(discrim))
volume2 = 0.5 * (1 + math.sqrt(discrim))
if 0 <= volume1 and volume1 <= 1:
return volume1
elif 0 <= volume2 and volume2 <= 1:
return volume2
msg = "Could not compute the volume fraction: inconsistent results"
raise RuntimeError, msg
def get_qstar_with_error(self, extrapolation=None):
"""
Compute the invariant uncertainty.
This uncertainty computation depends on whether or not the data is
smeared.
:param extrapolation: string to apply optional extrapolation
:return: invariant, the invariant uncertainty
"""
self.get_qstar(extrapolation)
return self._qstar, self._qstar_err
def get_volume_fraction_with_error(self, contrast, extrapolation=None):
"""
Compute uncertainty on volume value as well as the volume fraction
This uncertainty is given by the following equation: ::
dV = 0.5 * (4*k* dq_star) /(2* math.sqrt(1-k* q_star))
for k = 10^(-8)*q_star/(2*(pi*|contrast|)**2)
q_star: the invariant value including extrapolated value if existing
dq_star: the invariant uncertainty
dV: the volume uncertainty
The uncertainty will be set to -1 if it can't be computed.
:param contrast: contrast value
:param extrapolation: string to apply optional extrapolation
:return: V, dV = volume fraction, error on volume fraction
"""
volume = self.get_volume_fraction(contrast, extrapolation)
# Compute error
k = 1.e-8 * self._qstar / (2 * (math.pi * math.fabs(float(contrast))) ** 2)
# Check value inside the sqrt function
value = 1 - k * self._qstar
if (value) <= 0:
uncertainty = -1
# Compute uncertainty
uncertainty = math.fabs((0.5 * 4 * k * \
self._qstar_err) / (2 * math.sqrt(1 - k * self._qstar)))
return volume, uncertainty
def get_surface_with_error(self, contrast, porod_const, extrapolation=None):
"""
Compute uncertainty of the surface value as well as the surface value.
The uncertainty is given as follow: ::
dS = porod_const *2*pi[( dV -2*V*dV)/q_star
+ dq_star(v-v**2)
q_star: the invariant value
dq_star: the invariant uncertainty
V: the volume fraction value
dV: the volume uncertainty
:param contrast: contrast value
:param porod_const: porod constant value
:param extrapolation: string to apply optional extrapolation
:return S, dS: the surface, with its uncertainty
"""
# We get the volume fraction, with error
# get_volume_fraction_with_error calls get_volume_fraction
# get_volume_fraction calls get_qstar
# which computes Qstar and dQstar
v, dv = self.get_volume_fraction_with_error(contrast, extrapolation)
s = self.get_surface(contrast=contrast, porod_const=porod_const,
extrapolation=extrapolation)
ds = porod_const * 2 * math.pi * ((dv - 2 * v * dv) / self._qstar\
+ self._qstar_err * (v - v ** 2))
return s, ds
|
|
#/u/GoldenSights
import bot
import bot3
import datetime
import praw4 as praw
import prawcore
import random
import requests
import sqlite3
import string
import sys
import time
import traceback
import types
USERAGENT = '''
/u/GoldenSights Usernames data collection:
Gathering the creation dates of user accounts for visualization.
More at https://github.com/voussoir/reddit/tree/master/Usernames
'''.replace('\n', ' ').strip()
sql = sqlite3.connect('D:\\git\\reddit\\usernames\\un.db')
cur = sql.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS users(
idint INT,
idstr TEXT,
created INT,
human TEXT,
name TEXT,
link_karma INT,
comment_karma INT,
total_karma INT,
available INT,
lastscan INT,
lowername TEXT)
''')
cur.execute('CREATE INDEX IF NOT EXISTS userindex ON users(idint)')
cur.execute('CREATE INDEX IF NOT EXISTS index_users_available ON users(available)')
cur.execute('CREATE INDEX IF NOT EXISTS nameindex ON users(lowername)')
sql.commit()
# These numbers are used for interpreting the tuples that come from SELECT
SQL_USER_COLUMNS = [
'idint',
'idstr',
'created',
'human',
'name',
'link_karma',
'comment_karma',
'total_karma',
'available',
'lastscan',
'lowername',
]
SQL_USER = {key:index for (index, key) in enumerate(SQL_USER_COLUMNS)}
AVAILABILITY = {True:'available', False:'unavailable', 'available':1, 'unavailable':0}
HEADER_FULL = ' ID CREATED NAME LINK COMMENT TOTAL LAST SCANNED'
HEADER_BRIEF = ' LAST SCANNED | NAME'
MEMBERFORMAT_FULL = '{id:>6} {created} {username:<20} {link_karma:>9} {comment_karma:>9} ({total_karma:>10}) | {lastscan}'
MEMBERFORMAT_BRIEF = '{lastscan} | {username}'
MIN_LASTSCAN_DIFF = 86400 * 2000
# Don't rescan a name if we scanned it this many days ago
VALID_CHARS = string.ascii_letters + string.digits + '_-'
# If True, print the name of the user we're about to fetch.
# Good for debugging problematic users.
PREPRINT = False
print('Logging in.')
r = bot.login()
r3 = bot3.login(bot3.praw.Reddit(USERAGENT))
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def fetchgenerator(cur):
'''
Create an generator from cur fetches so I don't
have to use while loops for everything
'''
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def getentry(**kwargs):
if len(kwargs) != 1:
raise Exception("Only 1 argument please")
kw = list(kwargs.keys())[0]
if kw == 'idint':
cur.execute('SELECT * FROM users WHERE idint=?', [kwargs[kw]])
elif kw == 'idstr':
cur.execute('SELECT * FROM users WHERE idstr=?', [kwargs[kw]])
elif kw == 'name':
cur.execute('SELECT * FROM users WHERE lowername=?', [kwargs[kw].lower()])
else:
return None
return cur.fetchone()
def getnow(timestamp=True):
now = datetime.datetime.now(datetime.timezone.utc)
if timestamp:
return now.timestamp()
return now
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def memberformat_brief(data):
'''
Shorter version of memberformat which I'm using for the "available" list.
'''
name = data[SQL_USER['name']]
lastscan = data[SQL_USER['lastscan']]
lastscan = human(lastscan)
out = MEMBERFORMAT_BRIEF.format(lastscan=lastscan, username=name)
return out
def memberformat_full(data):
'''
Given a data list, create a string that will
become a single row in one of the show files.
'''
idstr = data[SQL_USER['idstr']]
# Usernames are maximum of 20 chars
name = data[SQL_USER['name']]
created = data[SQL_USER['human']]
created = created or ''
link_karma = data[SQL_USER['link_karma']]
comment_karma = data[SQL_USER['comment_karma']]
total_karma = data[SQL_USER['total_karma']]
if link_karma is None:
link_karma = 'None'
comment_karma = 'None'
total_karma = 'None'
else:
link_karma = '{:,}'.format(link_karma)
comment_karma = '{:,}'.format(comment_karma)
total_karma = '{:,}'.format(total_karma)
lastscan = data[SQL_USER['lastscan']]
lastscan = human(lastscan)
out = MEMBERFORMAT_FULL.format(
id=idstr,
created=created,
username=name,
link_karma=link_karma,
comment_karma=comment_karma,
total_karma=total_karma,
lastscan=lastscan,
)
return out
def print_message(data, printprefix=''):
if data[SQL_USER['human']] is not None:
print('{prefix:>5} {idstr:>6} : {human} : {name} : {link_karma} : {comment_karma}'.format(
prefix=printprefix,
idstr=data[SQL_USER['idstr']],
human=data[SQL_USER['human']],
name=data[SQL_USER['name']],
link_karma=data[SQL_USER['link_karma']],
comment_karma=data[SQL_USER['comment_karma']],
)
)
else:
availability = 'available' if data[SQL_USER['available']] is 1 else 'unavailable'
print('{prefix:>5} {availability:>33} : {name}'.format(
prefix=printprefix,
availability=availability,
name=data[SQL_USER['name']],
)
)
def process(users, quiet=False, knownid='', noskip=False, commit=True):
'''
Fetch the /u/ page for a user or list of users
users : A list of strings, each representing a username. Since reddit
usernames must be 3 - 20 characters and only contain
alphanumeric + "_-", any improper strings will be removed.
quiet : Silences the "x old" report at the end
knownid : If you're processing a user which does not exist, but you know
what their user ID was supposed to be, this will at least allow
you to flesh out the database entry a little better.
noskip : Do not skip usernames which are already in the database.
'''
olds = 0
if isinstance(users, list):
users = list(set(users))
if isinstance(users, (str, praw.models.Redditor)):
users = [users]
if isinstance(users, types.GeneratorType) or len(users) > 1:
knownid = ''
users = userify_list(users, noskip=noskip, quiet=quiet)
current = 0
for user in users:
current += 1
data = [None] * len(SQL_USER)
data[SQL_USER['lastscan']] = int(getnow())
if isinstance(user, list):
# This happens when we receive NotFound. [name, availability]
if knownid != '':
data[SQL_USER['idint']] = b36(knownid)
data[SQL_USER['idstr']] = knownid
data[SQL_USER['name']] = user[0]
data[SQL_USER['available']] = AVAILABILITY[user[1]]
else:
# We have a Redditor object.
h = human(user.created_utc)
data[SQL_USER['idint']] = b36(user.id)
data[SQL_USER['idstr']] = user.id
data[SQL_USER['created']] = user.created_utc
data[SQL_USER['human']] = h
data[SQL_USER['name']] = user.name
data[SQL_USER['link_karma']] = user.link_karma
data[SQL_USER['comment_karma']] = user.comment_karma
data[SQL_USER['total_karma']] = user.comment_karma + user.link_karma
data[SQL_USER['available']] = 0
data[SQL_USER['lowername']] = data[SQL_USER['name']].lower()
printprefix = '%04d' % current
x = smartinsert(data, printprefix, commit=commit)
if x is False:
olds += 1
if quiet is False:
print('%d old' % olds)
def process_from_database(filename, table, column, delete_original=False):
'''
Warning: if delete_original is True, the original database will lose each username
as it is processed
'''
s = sqlite3.connect(filename)
c = s.cursor()
c2 = s.cursor()
query = 'SELECT %s FROM %s ORDER BY RANDOM()' % (column, table)
c.execute(query)
i = 0
try:
for (index, item) in enumerate(fetchgenerator(c)):
i = (i + 1) % 100
if i == 0:
s.commit()
username = item[0]
if username is not None:
process(username, quiet=True, commit=index % 100 == 0)
if delete_original:
c2.execute('DELETE FROM %s WHERE %s == ?' % (table, column), [username])
except (Exception, KeyboardInterrupt) as e:
sql.commit()
if delete_original:
s.commit()
print('Committing changes...')
s.close()
raise e
sql.commit()
s.commit()
s.close()
def smartinsert(data, printprefix='', commit=True):
'''
Originally, all queries were based on idint, but this caused problems
when accounts were deleted / banned, because it wasn't possible to
sql-update without knowing the ID.
'''
print_message(data, printprefix)
exists_in_db = (getentry(name=data[SQL_USER['name']].lower()) is not None)
if exists_in_db:
isnew = False
data = [
data[SQL_USER['idint']],
data[SQL_USER['idstr']],
data[SQL_USER['created']],
data[SQL_USER['human']],
data[SQL_USER['link_karma']],
data[SQL_USER['comment_karma']],
data[SQL_USER['total_karma']],
data[SQL_USER['available']],
data[SQL_USER['lastscan']],
data[SQL_USER['name']],
data[SQL_USER['name']].lower()]
# coalesce allows us to fallback on the existing values
# if the given values are null, to avoid erasing data about users
# whose accounts are now deleted.
command = '''
UPDATE users SET
idint = coalesce(?, idint),
idstr = coalesce(?, idstr),
created = coalesce(?, created),
human = coalesce(?, human),
link_karma = coalesce(?, link_karma),
comment_karma = coalesce(?, comment_karma),
total_karma = coalesce(?, total_karma),
available = coalesce(?, available),
lastscan = coalesce(?, lastscan),
name = coalesce(?, name)
WHERE lowername == ?
'''
cur.execute(command, data)
else:
isnew = True
cur.execute('INSERT INTO users VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
if commit:
sql.commit()
return isnew
def userify_list(users, noskip=False, quiet=False):
if quiet is False:
if hasattr(users, '__len__'):
print('Processing %d unique names' % len(users))
for username in users:
if isinstance(username, str):
if len(username) < 3 or len(username) > 20:
#print('%s : Invalid length of %d' % (username, len(username)))
#continue
pass
if not all(c in VALID_CHARS for c in username):
print('%s : Contains invalid characters' % username)
continue
elif isinstance(username, praw.models.Redditor):
username = username.name.lower()
else:
print('Don\'t know what to do with %s' % username)
existing_entry = getentry(name=username)
if existing_entry is not None:
lastscan = existing_entry[SQL_USER['lastscan']]
should_rescan = (getnow() - lastscan) > MIN_LASTSCAN_DIFF
if should_rescan is False and noskip is False:
prefix = ' ' * 31
appendix = '(available)' if existing_entry[SQL_USER['available']] else ''
print('%sskipping : %s %s' % (prefix, username, appendix))
continue
try:
if PREPRINT:
print(username)
user = r.redditor(username)
if getattr(user, 'is_suspended', False):
# Suspended accounts provide extremely little info
# {"kind": "t2", "data": {"is_suspended": true, "name": "*****"}}
continue
yield user
except prawcore.exceptions.NotFound:
availability = r3.is_username_available(username)
availability = AVAILABILITY[availability]
yield [username, availability]
|
|
"""Entry point for the server application."""
import logging
import json
import uuid
# import time
import redis
from gevent.wsgi import WSGIServer
from flask import request, Response, jsonify
from datetime import datetime
from pytz import timezone, utc
from tzlocal import get_localzone
from .config import configure_app, app, HOSTNAME, PORT
from .utils import encode, oidc
from .models import HealthStatus, ServerTime, GuestbookEntry, GuestbookEntrySet
from flask_app import config
GUESTBOOK_BROWSE_PAGE_SIZE = 10
@app.before_first_request
def set_up():
"""Configure the application to be used by the application."""
configure_app(app)
@app.route('/api/v1/health', methods=['GET'])
def get_health():
"""Return service health information."""
ret = HealthStatus(is_up=True)
return jsonify(**(ret.to_dict()))
@app.route('/api/v1/time', methods=['GET'])
def get_time():
"""Return the current server time in the server's timezone"""
tz = get_localzone()
t = datetime.now(tz)
#t = tz.localize(t, is_dst=None)
st = ServerTime(hour=t.hour,
minute=t.minute,
second=t.second,
tz_name=tz.zone,
tz_offset=tz.utcoffset(datetime.now()).total_seconds()/3600)
return jsonify(st.to_dict())
@app.route('/api/v1/guestbook', methods=['POST'])
def sign_guestbook():
"""Accept a new guestbook entry posted to the API and return the new entry"""
print('Signing the guestbook')
payload = request.get_json(force=True)
if payload is None:
return error_response(400, 'Missing guestbook entry in POST payload')
name = payload.get('name')
message = payload.get('message')
if not name or not message:
return error_response(400, 'Missing required parameters')
entry = GuestbookEntry(
id=new_guid(),
name=name,
message=message,
timestamp=current_datetime()
)
# this is where it will get stored in redis
redis_key = get_guestbook_redis_key(entry.id)
json_str = json.dumps(entry.to_dict(), cls=encode.MyJSONEncoder)
r = get_redis()
r.set(redis_key, json_str)
r.lpush('guestbook_list', redis_key)
return success_response(entry.to_dict())
@app.route('/api/v1/guestbook', methods=['GET'])
def browse_guestbook():
"""Return the most recent guestbook entries"""
print('Browsing the guestbook')
last_id = request.args.get("last_id")
if last_id is None:
last_id = 0
else:
last_id = int(last_id)
r = get_redis()
keys = r.lrange('guestbook_list', last_id, (last_id+GUESTBOOK_BROWSE_PAGE_SIZE-1))
# map(lambda x:x.decode('utf-8'), keys)
entries = []
for key in keys:
json_str = r.get(key).decode('utf-8')
json_dic = json.loads(json_str)
entry = GuestbookEntry.from_dict(json_dic)
entries.append(entry)
entry_set = GuestbookEntrySet(
entries=entries,
count=len(entries),
last_id=str(last_id+len(entries)),
has_more=len(entries)==GUESTBOOK_BROWSE_PAGE_SIZE
)
return success_response(entry_set.to_dict())
@app.route('/api/v1/login', methods=['GET'])
def login_start():
client = oidc.OIDCClient(app.oidc_config)
session_id = new_guid()
redirect_uri = request.args.get('redirect_uri') or app.oidc_config.default_redirect_uri
scope = request.args.get('scope') or app.oidc_config.default_scope
state = oidc.OIDCClient.gen_nonce()
login_url = client.get_login_url(redirect_uri, scope, state)
# create session in redis and store state and redirect_uri for later use/validation
redis_key = get_login_redis_key(session_id)
r = get_redis()
r.hset(redis_key, 'state', state)
r.hset(redis_key, 'redirect_uri', redirect_uri)
return success_response(
{
'session_id': session_id,
'state': state,
'scope': scope,
'redirect_uri': redirect_uri,
'login_url': login_url
}
)
@app.route('/api/v1/login/token', methods=['POST'])
def login_token():
client = oidc.OIDCClient(app.oidc_config)
payload = request.get_json(force=True)
if payload is None:
return error_response(400, 'Missing POST body in login token request')
session_id = request.args.get('session_id') or payload.get('session_id') or request.cookies.get('wzstarter.login.session_id')
code = request.args.get('code') or payload.get('code')
state = request.args.get('state') or payload.get('state')
if not session_id:
return error_response(400, 'Missing session_id in login token request')
if not code:
return error_response(400, 'Missing code in login token request')
if not state:
return error_response(400, 'Missing state in login token request')
redis_key = get_login_redis_key(session_id)
r = get_redis()
print('final state: '+state)
cached_state = r.hget(redis_key, 'state').decode('utf-8')
print('cached state: ' + cached_state)
if state != cached_state:
return error_response(400, 'Incorrect state value provided')
cached_redirect_uri = r.hget(redis_key, 'redirect_uri')
tokens = client.get_tokens(cached_redirect_uri, code)
r.delete(redis_key)
return success_response(tokens.to_dict())
@app.route('/api/v1/login/info', methods=['GET'])
def get_user_info():
client = oidc.OIDCClient(app.oidc_config)
id_token = get_id_token(request)
if not id_token:
return error_response(401, "Missing id token")
try:
token = client.validate_token(id_token)
except:
return error_response(401, "Invalid id token")
return success_response(
{
'name': token.get('name'),
'sub': token.get('sub'),
'email': token.get('email')
}
)
def get_id_token(request):
id_token = request.headers.get("Authorization")
if id_token and id_token.startswith("Bearer "):
id_token = id_token[7:]
if not id_token:
id_token = request.cookies.get('wzstarter.oidc.id_token')
return id_token
def get_access_token(request):
access_token = request.headers.get("Authorization")
if access_token and access_token.startswith("Bearer "):
access_token = access_token[7:]
if not access_token:
access_token = request.cookies.get('wzstarter.oidc.access_token')
return access_token
def current_datetime():
return datetime.now(utc)
def error_response(status_code=400, message="Bad request"):
print('Sending error response')
ret = jsonify(message=message)
ret.status_code = status_code
ret.mimetype = 'application/json'
return ret
def success_response(response_dict={}):
print('Sending success response')
ret = jsonify(response_dict)
ret.status_code = 200
ret.mimetype='application/json'
return ret
@app.errorhandler(500)
def internal_server_error(error):
app.logger.error('Server Error: %s', (error))
return error_response(500, str(error))
@app.errorhandler(404)
def internal_server_error(error):
return error_response(404, "Not found")
@app.errorhandler(Exception)
def unhandled_exception(e):
app.logger.error('Unhandled Exception: %s', (e))
print('Unhandled Exception: %s', (e))
return error_response(500, str(e))
def new_guid():
return str(uuid.uuid4())
def get_redis():
cfg = app.redis_config
return redis.StrictRedis(host=cfg.hostname, port=cfg.port)
def get_guestbook_redis_key(id):
return 'guestbook:'+id
def get_login_redis_key(id):
return 'login:'+id
def parse_redis_key(key):
return key.split(':')[1]
def main():
"""Main entry point of the app."""
try:
http_server = WSGIServer((HOSTNAME, PORT),
app,
log=logging,
error_log=logging)
http_server.serve_forever()
except Exception as exc:
logging.error(exc.message)
finally:
# get last entry and insert build appended if not completed
# Do something here
pass
|
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
import wx.lib.ogl as wxOgl
import math
import wx
from wh import GetBitmap
BMP_BORDER=3
ARROWMARGIN=5
wxOgl.OGLInitialize()
def round(n):
return int(n+.5)
class ExplainText(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_DONTWRAP)
def SetData(self, rowset):
lst=[]
for row in rowset:
lst.append(str(row[0]))
self.SetValue("\n".join(lst))
def SetEmpty(self):
self.SetValue("")
class ExplainShape(wxOgl.BitmapShape):
def __init__(self, bmpname, str, tokenNo=-1, detailNo=-1):
wxOgl.BitmapShape.__init__(self)
self.kidCount=0
self.totalShapes=0
self.usedShapes=0
self.upperShape=None
self.description=str
self.condition=""
self.detail=""
self.SetBitmap(GetBitmap(bmpname, self))
if tokenNo < 0:
self.label=str
else:
strList=str.split(' ')
self.label = strList[tokenNo]
if detailNo < 0:
self.description = str
# if detailNo > 0:
# self.description=(self.description + " ".join(strList[0:detailNo])).lstrip()
# self.detail=strList[detailNo]
def __str__(self):
return "%s(%d) %d" % (self.label, self.level, self.totalShapes)
def GetLevel(self):
return self.level
def GetAverageCost(self):
return (self.costHigh - self.costLow) / 2 + self.costLow
def OnDraw(self, dc):
bmp=self.GetBitmap()
if not bmp.Ok():
return
x=int(self.GetX() - bmp.GetWidth()/2)
y=int(self.GetY() - bmp.GetHeight()/2)
dc.DrawBitmap(bmp, x, y, True)
dc.SetFont(self.GetCanvas().GetFont())
w,_h=dc.GetTextExtent(self.label)
x=self.GetX() - w/2
y += bmp.GetHeight() + BMP_BORDER
dc.DrawText(self.label, x, y)
def GetStartPoint(self):
pt=wx.RealPoint(self.GetX() + self.GetBitmap().GetWidth() / 2.0 + ARROWMARGIN, self.GetY())
return pt
def GetEndPoint(self, kidNo):
if self.kidCount>1:
koffs=round(self.GetBitmap().GetHeight() * 2. /3. * kidNo / (2*self.kidCount-2))
else:
koffs=0;
_sh=self.GetHeight()
_bh=self.GetBitmap().GetHeight()
pt=wx.RealPoint(self.GetX() - self.GetBitmap().GetWidth() / 2.0 - ARROWMARGIN, self.GetY()+koffs)
return pt
def OnLeftClick(self, _x, _y, _keys, _attachment):
self.GetCanvas().ShowPopup(self)
@staticmethod
def Create(level, last, str):
costPos=str.find("(cost=");
if costPos>0:
descr=str[0:costPos]
else:
descr=str
strList=str.split(' ')
token=strList[0]
if len(strList) > 1:
token2=strList[1]
if len(strList) > 2:
token3=strList[2]
if token == "Total":
return None
bmp={ "Result": "ex_result",
"Append": "ex_append",
"Nested": "ex_nested",
"Merge": "ex_merge",
"Materialize": "ex_materialize",
"Sort": "ex_sort",
"Group": "ex_group",
"Aggregate": "ex_aggregate",
"GroupAggregate":"ex_aggregate",
"HashAggregate": "ex_aggregate",
"Unique": "ex_unique",
"SetOp": "ex_setop",
"Limit": "ex_limit",
"Seek": "ex_seek",
}.get(token)
if bmp:
s= ExplainShape(bmp, descr)
elif token == "Hash":
if token2 == "Join":
s= ExplainShape("ex_join", descr)
else:
if token3 == "Join":
s= ExplainShape("ex_join", descr)
else:
s= ExplainShape("ex_hash", descr)
elif token == "Subquery":
s= ExplainShape("ex_subplan", descr, 0, 2)
elif token == "Function" :
s= ExplainShape("ex_result", descr, 0, 2)
elif token == "Bitmap":
if token2 == "Index":
s= ExplainShape("ex_bmp_index", descr, 4, 3)
else:
s= ExplainShape("ex_bmp_heap", descr, 4, 3)
elif token2 == "Scan":
if token == "Index":
s= ExplainShape("ex_index_scan", descr, 3, 2)
elif token == "Tid":
s= ExplainShape("ex_tid_scan", descr, 3, 2)
else:
s= ExplainShape("ex_scan", descr, 3, 2);
else:
s=ExplainShape("ex_unknown", descr)
s.SetDraggable(False)
s.level = level
if costPos > 0:
actPos = str.find("(actual")
if actPos > 0:
s.actual = str[actPos:]
s.cost = str[costPos:actPos-costPos]
else:
s.cost = str[costPos:]
w=50
h=20
bmp=s.GetBitmap();
if w < bmp.GetWidth():
w = bmp.GetWidth()
s.SetHeight(bmp.GetHeight() + BMP_BORDER + h)
s.SetWidth(w);
s.upperShape = last;
if last:
s.kidNo = last.kidCount
last.kidCount = last.kidCount+1
else:
s.kidNo = 0
if costPos > 0:
cl=str[costPos+6:-1].split(' ')
costs=cl[0].split('..')
s.costLow=float(costs[0])
s.costHigh=float(costs[1])
s.width=int(cl[1].split('=')[1])
s.rows=int(cl[2].split('=')[1])
return s
class ExplainLine(wxOgl.LineShape):
def __init__(self, fromShape, toShape):
wxOgl.LineShape.__init__(self)
self.SetCanvas(fromShape)
self.width = int(math.log(fromShape.GetAverageCost()))
if self.width > 10:
self.width = 10
self.startPoint=fromShape.GetStartPoint()
self.endPoint=toShape.GetEndPoint(fromShape.kidNo)
self.MakeLineControlPoints(2)
self._lineControlPoints[0]=fromShape.GetStartPoint()
self._lineControlPoints[1]=toShape.GetEndPoint(fromShape.kidNo)
self.name="%s -> %s" %(fromShape.label, toShape.label)
self.Initialise()
fromShape.AddLine(self, toShape)
def OnDraw(self, dc):
if self._lineControlPoints:
dc.SetPen(wx.ThePenList.FindOrCreatePen(wx.BLACK, 1, wx.SOLID))
dc.SetBrush(wx.TheBrushList.FindOrCreateBrush(wx.LIGHT_GREY, wx.SOLID))
p0x,p0y=self.startPoint
p3x,p3y=self.endPoint
xd=(p3x-p0x)/3.
p1x = p0x + xd-8
p2x = p3x - xd+8
width=self.width
phi = math.atan2(p3y - p0y, p2x - p1x)
offs = -width * math.tan(phi/2)
arrow=4
points=[]
def append(x, y):
points.append( (round(x), round(y)))
append(p0x, p0y-width)
append(p1x-offs, p0y-width)
append(p2x-offs-arrow, p3y-width)
append(p3x-width-arrow, p3y-width)
append(p3x-width-arrow, p3y-width-arrow)
append(p3x, p3y)
append(p3x-width-arrow, p3y+width+arrow)
append(p3x-width-arrow, p3y+width)
append(p2x+offs-arrow, p3y+width)
append(p1x+offs, p0y+width)
append(p0x, p0y+width)
dc.DrawPolygon(points, 0, 0)
class ExplainCanvas(wxOgl.ShapeCanvas):
def __init__(self, parent):
wxOgl.ShapeCanvas.__init__(self, parent)
self.SetDiagram(wxOgl.Diagram())
self.GetDiagram().SetCanvas(self)
self.SetBackgroundColour(wx.WHITE)
self.lastShape=None
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
def SetEmpty(self):
self.GetDiagram().DeleteAllShapes()
self.lastShape=None
self.result=[]
def GetResult(self):
return self.result
def SetData(self, rowset):
self.SetEmpty()
last=None
maxLevel=0
while rowset.HasMore():
tmp=rowset.Next()[0]
self.result.append(tmp)
line=tmp.strip()
while True:
if line.count('(') > line.count(')') and rowset.HasMore():
line = "%s %s" % (line, rowset.Next()[0])
else:
break
level = (len(tmp) - len(line) +4) / 6
if last:
if level:
if line[:4] == "-> ":
line=line[4:]
else:
last.condition=line
continue
while last and level <= last.GetLevel():
last = last.upperShape
s=ExplainShape.Create(level, last, line)
if not s:
continue
s.SetCanvas(self)
self.InsertShape(s)
s.Show(True)
if level > maxLevel:
maxLevel = level
if not last:
self.rootShape = s
last=s
x0 = int(self.rootShape.GetWidth()*1.5)
y0 = int(self.rootShape.GetHeight()*0.6)
xoffs = int(self.rootShape.GetWidth()*2.6)
yoffs = int(self.rootShape.GetHeight()*1.2)
lst=self.GetDiagram().GetShapeList()[:]
for s in lst:
if not s.totalShapes:
s.totalShapes = 1
if s.upperShape:
s.upperShape.totalShapes += s.totalShapes
lst.reverse()
for s in lst:
level=s.GetLevel()
s.SetX(x0 + (maxLevel - level) * xoffs)
upper = s.upperShape
if upper:
s.SetY(upper.GetY() + upper.usedShapes * yoffs)
upper.usedShapes += s.totalShapes
l=ExplainLine(s, upper)
l.Show(True)
self.AddShape(l)
else:
s.SetY(y0)
PIXPERUNIT=20
w=(maxLevel * xoffs + x0*2 + PIXPERUNIT - 1) / PIXPERUNIT
h=(self.rootShape.totalShapes * yoffs + y0*2 + PIXPERUNIT - 1) / PIXPERUNIT
self.SetScrollbars(PIXPERUNIT, PIXPERUNIT, w, h)
def OnMouseMove(self, evt):
sx,sy=self.CalcUnscrolledPosition(evt.GetX(), evt.GetY())
shape, _=self.FindShape(sx, sy)
if shape and isinstance(shape, ExplainShape):
if shape.costHigh == shape.costLow:
cost="cost=%.2f" % shape.costLow
else:
cost="cost=%.2f .. %.2f" % (shape.costLow, shape.costHigh)
lines=[]
lines.append(shape.description)
if shape.condition:
lines.append(shape.condition)
lines.append(cost)
lines.append("rows=%d, size=%d" % (shape.width, shape.rows))
self.SetToolTipString("\n".join(lines))
else:
self.SetToolTipString("")
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemuser_nspartition_binding(base_resource) :
""" Binding class showing the nspartition that can be bound to systemuser.
"""
def __init__(self) :
self._partitionname = ""
self._username = ""
self.___count = 0
@property
def username(self) :
"""Name of the system-user entry to which to bind the command policy.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
"""Name of the system-user entry to which to bind the command policy.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def partitionname(self) :
"""Name of the Partition to bind to the system user.
"""
try :
return self._partitionname
except Exception as e:
raise e
@partitionname.setter
def partitionname(self, partitionname) :
"""Name of the Partition to bind to the system user.
"""
try :
self._partitionname = partitionname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemuser_nspartition_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemuser_nspartition_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.username) :
return str(self.username)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemuser_nspartition_binding()
updateresource.username = resource.username
updateresource.partitionname = resource.partitionname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemuser_nspartition_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].username = resource[i].username
updateresources[i].partitionname = resource[i].partitionname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemuser_nspartition_binding()
deleteresource.username = resource.username
deleteresource.partitionname = resource.partitionname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemuser_nspartition_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].username = resource[i].username
deleteresources[i].partitionname = resource[i].partitionname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, username) :
""" Use this API to fetch systemuser_nspartition_binding resources.
"""
try :
obj = systemuser_nspartition_binding()
obj.username = username
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, username, filter_) :
""" Use this API to fetch filtered set of systemuser_nspartition_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemuser_nspartition_binding()
obj.username = username
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, username) :
""" Use this API to count systemuser_nspartition_binding resources configued on NetScaler.
"""
try :
obj = systemuser_nspartition_binding()
obj.username = username
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, username, filter_) :
""" Use this API to count the filtered set of systemuser_nspartition_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemuser_nspartition_binding()
obj.username = username
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class systemuser_nspartition_binding_response(base_response) :
def __init__(self, length=1) :
self.systemuser_nspartition_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemuser_nspartition_binding = [systemuser_nspartition_binding() for _ in range(length)]
|
|
import csv
import decimal
from datetime import date
from decimal import Decimal
import random
import json
from django.http import QueryDict
from pytest import mark
from django.urls import reverse
from django.conf import settings
from django.utils import timezone
from freezegun import freeze_time
import responses
from assopy.models import Invoice, Order, Vat
from tests.factories import FareFactory, OrderFactory
from conference.models import Fare, Conference
from conference.invoicing import (
EPS_18,
CSV_2018_REPORT_COLUMNS,
)
from conference.currencies import (
DAILY_ECB_URL,
EXAMPLE_ECB_DAILY_XML,
EXAMPLE_ECB_DATE,
fetch_and_store_latest_ecb_exrates,
)
from conference.fares import (
pre_create_typical_fares_for_conference,
)
from email_template.models import Email
from tests.common_tools import make_user
def _prepare_invoice_for_basic_test(order_code, invoice_code):
# default password is 'password123'
user = make_user()
# FYI(artcz): Order.objects.create is overloaded method on
# OrderManager, that sets up a lot of unused stuff, going with manual
# .save().
order = Order(user=user.assopy_user, code=order_code)
order.save()
# create some random Vat instance to the invoice creation works
vat_10 = Vat.objects.create(value=10)
return Invoice.objects.create(
code=invoice_code,
order=order,
emit_date=date.today(),
price=Decimal(1337),
vat=vat_10,
html="<html>Here goes full html</html>",
exchange_rate_date=date.today(),
)
@mark.django_db
def test_invoice_html(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="joedoe@example.com", password="password123")
invoice_url = reverse(
"assopy-invoice-html",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert (
response.content.decode("utf-8") == "<html>Here goes full html</html>"
)
@mark.django_db
def test_invoice_pdf(client):
# invoice_code must be validated via ASSOPY_IS_REAL_INVOICE
invoice_code, order_code = "I123", "asdf"
_prepare_invoice_for_basic_test(order_code, invoice_code)
client.login(email="joedoe@example.com", password="password123")
invoice_url = reverse(
"assopy-invoice-pdf",
kwargs={"order_code": order_code, "code": invoice_code},
)
response = client.get(invoice_url)
assert response.status_code == 200
assert response["Content-type"] == "application/pdf"
def create_order_and_invoice(assopy_user, fare):
order = OrderFactory(user=assopy_user, items=[(fare, {"qty": 1})])
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
# confirm_order by default creates placeholders, but for most of the tests
# we can upgrade them to proper invoices anyway.
invoice = Invoice.objects.get(order=order)
return invoice
@mark.django_db
def test_if_invoice_stores_information_about_the_seller(client):
"""
Testing #591
https://github.com/EuroPython/epcon/issues/591
"""
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
# need this email to generate invoices/orders
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
def invoice_url(invoice):
return reverse(
"assopy-invoice-html",
kwargs={"code": invoice.code, "order_code": invoice.order.code},
)
with freeze_time("2018-01-01"):
# We need to log in again after every time travel, just in case.
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.code == "I/18.0001"
assert invoice.emit_date == date(2018, 1, 1)
assert invoice.issuer == EPS_18
assert invoice.html.startswith("<!DOCTYPE")
response = client.get(invoice_url(invoice))
assert EPS_18 in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_vat_in_GBP_for_2018(client):
"""
https://github.com/EuroPython/epcon/issues/617
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user()
with freeze_time("2018-05-05"):
client.login(email="joedoe@example.com", password="password123")
invoice = create_order_and_invoice(user.assopy_user, fare)
assert invoice.html.startswith("<!DOCTYPE")
assert invoice.vat_value() == Decimal("1.67")
assert invoice.vat_in_local_currency == Decimal("1.49")
assert invoice.local_currency == "GBP"
assert invoice.exchange_rate == Decimal("0.89165")
assert invoice.exchange_rate_date == EXAMPLE_ECB_DATE
response = client.get(invoice.get_html_url())
content = response.content.decode("utf-8")
# The wording used to be different, so we had both checks in one line,
# but beacuse of template change we had to separate them
assert 'local-currency="GBP"' in content
assert 'total-vat-in-local-currency="1.49"' in content
# we're going to use whatever the date was received/cached from ECB XML
# doesnt matter what emit date is
assert (
"ECB rate used for VAT is 0.89165 GBP/EUR from 2018-03-06"
in content
)
response = client.get(invoice.get_absolute_url())
assert response["Content-Type"] == "application/pdf"
@mark.django_db
@responses.activate
@freeze_time("2018-05-05")
def test_create_invoice_with_many_items(client):
"""
This test is meant to be used to test invoice template design.
It creates a lot of different items on the invoice, and after that we can
use serve(content) to easily check in the browser how the Invoice looks
like.
Freezing it at 2018 so we can easily check EP2018 invoices.
"""
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
user = make_user()
vat_rate_20, _ = Vat.objects.get_or_create(value=20)
CONFERENCE = settings.CONFERENCE_CONFERENCE
pre_create_typical_fares_for_conference(CONFERENCE, vat_rate_20)
# Don't need to set dates for this test.
# set_early_bird_fare_dates(CONFERENCE, yesterday, tomorrow)
# set_regular_fare_dates(CONFERENCE, yesterday, tomorrow)
random_fares = random.sample(list(Fare.objects.all()), 3)
order = OrderFactory(
user=user.assopy_user,
items=[(fare, {"qty": i}) for i, fare in enumerate(random_fares, 1)],
)
with responses.RequestsMock() as rsps:
# mocking responses for the invoice VAT exchange rate feature
rsps.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
fetch_and_store_latest_ecb_exrates()
order.confirm_order(timezone.now())
@mark.django_db
@responses.activate
def test_export_invoice_csv(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel:invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
next(invoice_reader) # skip header
invoice = next(invoice_reader)
iter_column = iter(invoice)
assert next(iter_column) == invoice1.code
assert next(iter_column) == "2018-05-05"
assert next(iter_column) == invoice1.order.user.user.get_full_name()
assert next(iter_column) == invoice1.order.card_name
next(iter_column) # ignore the address
assert next(iter_column) == invoice1.order.country.name
assert next(iter_column) == invoice1.order.vat_number
next(iter_column) # ignore the currency
assert (
decimal.Decimal(next(iter_column))
== invoice1.net_price_in_local_currency
)
assert decimal.Decimal(next(iter_column)) == invoice1.vat_in_local_currency
assert (
decimal.Decimal(next(iter_column)) == invoice1.price_in_local_currency
)
@mark.django_db
@responses.activate
def test_export_invoice_csv_before_period(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-04-05"):
create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 5, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel:invoice_export_for_tax_report_csv")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"] == "text/csv"
invoice_reader = csv.reader(response.content.decode("utf-8").splitlines())
header = next(invoice_reader)
assert header == CSV_2018_REPORT_COLUMNS
assert next(invoice_reader, None) is None
@mark.django_db
@responses.activate
def test_export_invoice(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel:invoice_export_for_tax_report")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("text/html")
assert '<tr id="invoice_{0}">'.format(
invoice1.id
) in response.content.decode("utf-8")
@mark.django_db
@responses.activate
def test_export_invoice_accounting_json(client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
responses.add(responses.GET, DAILY_ECB_URL, body=EXAMPLE_ECB_DAILY_XML)
Email.objects.create(code="purchase-complete")
fare = FareFactory()
user = make_user(is_staff=True)
client.login(email=user.email, password="password123")
with freeze_time("2018-05-05"):
invoice1 = create_order_and_invoice(user.assopy_user, fare)
query_dict = QueryDict(mutable=True)
query_dict["start_date"] = date(2018, 1, 1)
query_dict["end_date"] = date.today()
query_string = query_dict.urlencode()
response = client.get(
reverse("debug_panel:invoice_export_for_payment_reconciliation_json")
+ "?"
+ query_string
)
assert response.status_code == 200
assert response["content-type"].startswith("application/json")
data = json.loads(response.content)["invoices"]
assert len(data) == 1
assert data[0]["ID"] == invoice1.code
assert decimal.Decimal(data[0]["net"]) == invoice1.net_price()
assert decimal.Decimal(data[0]["vat"]) == invoice1.vat_value()
assert decimal.Decimal(data[0]["gross"]) == invoice1.price
assert data[0]["order"] == invoice1.order.code
assert data[0]["stripe"] == invoice1.order.stripe_charge_id
def test_reissue_invoice(admin_client):
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE, name=settings.CONFERENCE_NAME
)
invoice_code, order_code = "I123", "asdf"
invoice = _prepare_invoice_for_basic_test(order_code, invoice_code)
NEW_CUSTOMER = "NEW CUSTOMER"
assert Invoice.objects.all().count() == 1
assert NEW_CUSTOMER not in Invoice.objects.latest("id").html
url = reverse("debug_panel:reissue_invoice", args=[invoice.id])
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {"emit_date": "2018-01-01", "customer": NEW_CUSTOMER}
)
assert response.status_code == 302
assert Invoice.objects.all().count() == 2
assert NEW_CUSTOMER in Invoice.objects.latest("id").html
|
|
#! /usr/bin/env python
"""Test script for the imageop module. This has the side
effect of partially testing the imgfile module as well.
Roger E. Masse
"""
from test.test_support import verbose, unlink, import_module, run_unittest
imageop = import_module('imageop', deprecated=True)
import uu, os, unittest
SIZES = (1, 2, 3, 4)
_VALUES = (1, 2, 2**10, 2**15-1, 2**15, 2**15+1, 2**31-2, 2**31-1)
VALUES = tuple( -x for x in reversed(_VALUES) ) + (0,) + _VALUES
AAAAA = "A" * 1024
MAX_LEN = 2**20
class InputValidationTests(unittest.TestCase):
def _check(self, name, size=None, *extra):
func = getattr(imageop, name)
for height in VALUES:
for width in VALUES:
strlen = abs(width * height)
if size:
strlen *= size
if strlen < MAX_LEN:
data = "A" * strlen
else:
data = AAAAA
if size:
arguments = (data, size, width, height) + extra
else:
arguments = (data, width, height) + extra
try:
func(*arguments)
except (ValueError, imageop.error):
pass
def check_size(self, name, *extra):
for size in SIZES:
self._check(name, size, *extra)
def check(self, name, *extra):
self._check(name, None, *extra)
def test_input_validation(self):
self.check_size("crop", 0, 0, 0, 0)
self.check_size("scale", 1, 0)
self.check_size("scale", -1, -1)
self.check_size("tovideo")
self.check("grey2mono", 128)
self.check("grey2grey4")
self.check("grey2grey2")
self.check("dither2mono")
self.check("dither2grey2")
self.check("mono2grey", 0, 0)
self.check("grey22grey")
self.check("rgb2rgb8") # nlen*4 == len
self.check("rgb82rgb")
self.check("rgb2grey")
self.check("grey2rgb")
def test_main():
run_unittest(InputValidationTests)
try:
import imgfile
except ImportError:
return
# Create binary test files
uu.decode(get_qualified_path('testrgb'+os.extsep+'uue'), 'test'+os.extsep+'rgb')
image, width, height = getimage('test'+os.extsep+'rgb')
# Return the selected part of image, which should by width by height
# in size and consist of pixels of psize bytes.
if verbose:
print 'crop'
newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1)
# Return image scaled to size newwidth by newheight. No interpolation
# is done, scaling is done by simple-minded pixel duplication or removal.
# Therefore, computer-generated images or dithered images will
# not look nice after scaling.
if verbose:
print 'scale'
scaleimage = imageop.scale(image, 4, width, height, 1, 1)
# Run a vertical low-pass filter over an image. It does so by computing
# each destination pixel as the average of two vertically-aligned source
# pixels. The main use of this routine is to forestall excessive flicker
# if the image two vertically-aligned source pixels, hence the name.
if verbose:
print 'tovideo'
videoimage = imageop.tovideo (image, 4, width, height)
# Convert an rgb image to an 8 bit rgb
if verbose:
print 'rgb2rgb8'
greyimage = imageop.rgb2rgb8(image, width, height)
# Convert an 8 bit rgb image to a 24 bit rgb image
if verbose:
print 'rgb82rgb'
image = imageop.rgb82rgb(greyimage, width, height)
# Convert an rgb image to an 8 bit greyscale image
if verbose:
print 'rgb2grey'
greyimage = imageop.rgb2grey(image, width, height)
# Convert an 8 bit greyscale image to a 24 bit rgb image
if verbose:
print 'grey2rgb'
image = imageop.grey2rgb(greyimage, width, height)
# Convert a 8-bit deep greyscale image to a 1-bit deep image by
# thresholding all the pixels. The resulting image is tightly packed
# and is probably only useful as an argument to mono2grey.
if verbose:
print 'grey2mono'
monoimage = imageop.grey2mono (greyimage, width, height, 0)
# monoimage, width, height = getimage('monotest.rgb')
# Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
# All pixels that are zero-valued on input get value p0 on output and
# all one-value input pixels get value p1 on output. To convert a
# monochrome black-and-white image to greyscale pass the values 0 and
# 255 respectively.
if verbose:
print 'mono2grey'
greyimage = imageop.mono2grey (monoimage, width, height, 0, 255)
# Convert an 8-bit greyscale image to a 1-bit monochrome image using a
# (simple-minded) dithering algorithm.
if verbose:
print 'dither2mono'
monoimage = imageop.dither2mono (greyimage, width, height)
# Convert an 8-bit greyscale image to a 4-bit greyscale image without
# dithering.
if verbose:
print 'grey2grey4'
grey4image = imageop.grey2grey4 (greyimage, width, height)
# Convert an 8-bit greyscale image to a 2-bit greyscale image without
# dithering.
if verbose:
print 'grey2grey2'
grey2image = imageop.grey2grey2 (greyimage, width, height)
# Convert an 8-bit greyscale image to a 2-bit greyscale image with
# dithering. As for dither2mono, the dithering algorithm is currently
# very simple.
if verbose:
print 'dither2grey2'
grey2image = imageop.dither2grey2 (greyimage, width, height)
# Convert a 4-bit greyscale image to an 8-bit greyscale image.
if verbose:
print 'grey42grey'
greyimage = imageop.grey42grey (grey4image, width, height)
# Convert a 2-bit greyscale image to an 8-bit greyscale image.
if verbose:
print 'grey22grey'
image = imageop.grey22grey (grey2image, width, height)
# Cleanup
unlink('test'+os.extsep+'rgb')
def getimage(name):
"""return a tuple consisting of
image (in 'imgfile' format) width and height
"""
import imgfile
try:
sizes = imgfile.getsizes(name)
except imgfile.error:
name = get_qualified_path(name)
sizes = imgfile.getsizes(name)
if verbose:
print 'imgfile opening test image: %s, sizes: %s' % (name, str(sizes))
image = imgfile.read(name)
return (image, sizes[0], sizes[1])
def get_qualified_path(name):
""" return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
if __name__ == '__main__':
test_main()
|
|
"""
------------
PyMine 1.0.1
------------
PyMine lets you integrate and visualize biological data used for drug discovery using PyMOL.
------------
REQUIREMENTS
------------
1) Ubuntu 11.04 or above OR Mac OS X 10.7 or above
2) Pymol 1.7 or above
3) PyMine 1.0.1
------------
INSTALLATION
------------
1) Download and install pymol. http://sourceforge.net/projects/pymol/
2) Download and unzip PyMine. https://github.com/zhijunlilab/PyMine
3) Open PyMol. Install PyMine: Plugins -> Manage Plugins -> Install -> (locate pymine.py file).
4) Restart PyMol
Using MacPyMOL
1) Rename the "MacPyMOL.app" to "PyMOLX11Hybrid.app" in Applications folder.
2) install XQuartz found at http://xquartz.macosforge.org/landing/
3) Follow the installatin procedure of plugin mentioned above.
-------
History
-------
- v1.0.0: Initial public release
-------------
Licence (MIT)
-------------
Copyright (c) 2015 Rajan Chaudhari and Zhijun Li
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
import fileinput
import Tkinter
import tkMessageBox
import urllib2
import pymol
from Tkinter import PhotoImage as PI
import xml.etree.ElementTree as ET
import webbrowser
import tkFileDialog
#initialize pymol plugin
def __init__(self):
self.menuBar.addmenuitem('Plugin', 'command',
'Gather information',
label = 'PyMine',
command = main)
class PyMine(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.grid()
self.createGUI()
# GLOBAL VARIABLE ARE DEFINED HERE SO THAT THEY CAN BE USED IN ANY FUNCTION
self.flag=0
self.pdb_id=''
self.pdb_chain_id=''
self.pdb_file=''
self.smiles=''
self.name=list()
self.summary=list()
self.symbol=list()
self.uniprot=list()
self.binding_sites=list()
self.ppi_bs_residues=''
self.lig_bs_residues=''
self.dna_bs_residues=''
self.rna_bs_residues=''
self.ion_bs_residues=''
self.pep_bs_residues=''
self.ki_comps=list()
self.ec50_comps=list()
self.ic50_comps=list()
self.pathways=list()
self.saps=list()
self.ligands=list()
self.ligand_chemblid=list()
self.target_chemblID=''
self.approved_drugs=list()
self.ligand_images=list()
self.kegg_info=''
self.userpdbfile=None
#self.label4.config(text=None)
self.entryVariable5.set(None)
def createGUI(self):
## Create Frame
self.frame1=Tkinter.Frame(self)
self.frame1.grid(sticky='nswe')
## INPUT
self.label1=Tkinter.Label(self.frame1, text="Enter PDB ID") #LABEL
self.label1.grid(row=1, column=0, sticky=Tkinter.W)
self.entryVariable1=Tkinter.StringVar(master=self.frame1) #INPUT Variable
self.entryVariable1.set('1RTK')
self.entry1=Tkinter.Entry(self.frame1, textvariable=self.entryVariable1, width=4) #INPUT Box
self.entry1.grid(row=1, column=1, sticky=Tkinter.W)
self.label1_1=Tkinter.Label(self.frame1, text="Enter Chain ID") #LABEL
self.label1_1.grid(row=1, column=2, sticky=Tkinter.W)
self.entryVariable2=Tkinter.StringVar(master=self.frame1) #Input Variable 2
self.entryVariable2.set('A')
self.entry2=Tkinter.Entry(self.frame1, textvariable=self.entryVariable2, width=2) #input box 2
self.entry2.grid(row=1, column=3, sticky=Tkinter.W)
self.label2_2=Tkinter.Label(self.frame1, text="OR") #LABEL
self.label2_2.grid(row=1, column=4, sticky=Tkinter.W)
#self.button1=Tkinter.Button(self.frame1, text="Submit", command=self.get_results) #Button1
#self.button1.grid(row=1, column=4, sticky=Tkinter.W)
self.button2=Tkinter.Button(self.frame1, text="Clear", command=self.clear) #Button2
self.button2.grid(row=4, column=4, sticky=Tkinter.W)
self.label3=Tkinter.Label(self.frame1, text="Select PDB File") #LABEL
self.label3.grid(row=2, column=0, sticky=Tkinter.W)
self.button1_1=Tkinter.Button(self.frame1, text="Browse", command=self.file_upload) #Button2
self.button1_1.grid(row=2, column=1, sticky=Tkinter.W)
self.label5=Tkinter.Label(self.frame1, text="Enter Uniprot ID") #LABEL
self.label5.grid(row=2, column=2, sticky=Tkinter.W)
self.entryVariable5=Tkinter.StringVar(master=self.frame1) #Input Variable 3
self.entryVariable5.set('')
self.entry5=Tkinter.Entry(self.frame1, textvariable=self.entryVariable5, width=6) #input box 3
self.entry5.grid(row=2, column=3, sticky=Tkinter.W)
self.button2_2=Tkinter.Button(self.frame1, text="Submit", command=self.get_results) #Button1
self.button2_2.grid(row=2, column=4, sticky=Tkinter.W)
self.label4=Tkinter.Label(self.frame1, width=10, anchor=Tkinter.W, justify=Tkinter.LEFT) #LABEL
self.label4.grid(row=3, column=1, sticky=Tkinter.W)
self.label2=Tkinter.Label(self.frame1, text="Enter Smile String") #LABEL
self.label2.grid(row=4, column=0, sticky=Tkinter.W)
self.entryVariable3=Tkinter.StringVar(master=self.frame1) #Input Variable 3
self.entryVariable3.set('')
self.entry3=Tkinter.Entry(self.frame1, textvariable=self.entryVariable3, width=10) #input box 3
self.entry3.grid(row=4, column=1, columnspan=2, sticky=Tkinter.W)
self.button3=Tkinter.Button(self.frame1, text="Find Similar Ligands", command=self.get_similar_ligands) #Button2
self.button3.grid(row=4, column=2, sticky=Tkinter.W)
self.button11=Tkinter.Button(self.frame1, text="?", command=self.smiles_help)
self.button11.grid(row=4, column=3, sticky=Tkinter.W)
## OUTPUT
self.rframe=Tkinter.LabelFrame(master=self.frame1, text="Data Panel")
self.rframe.grid(row=6, columnspan=6, sticky='nswe')
self.button5=Tkinter.Button(self.rframe, text="Protein", command=self.lift_prot_info)
self.button5.grid(row=0, column=0)
self.button6=Tkinter.Button(self.rframe, text="Ligands", command=self.lift_lig_info)
self.button6.grid(row=0, column=1)
self.button7=Tkinter.Button(self.rframe, text="PDB", command=self.lift_pdb_file)
self.button7.grid(row=0, column=3)
self.button8=Tkinter.Button(self.rframe, text="Uniprot", command=self.lift_uniprot_file)
self.button8.grid(row=0, column=2)
self.button9=Tkinter.Button(self.rframe, text="Pathways", command=self.lift_kegg_info)
self.button9.grid(row=0, column=4)
self.button10=Tkinter.Button(self.rframe, text="Similar Ligands", command=self.lift_ligss_info)
self.button10.grid(row=0, column=5)
self.text1=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text1.grid(row=5, column=0, columnspan=10, stick='ns')
scrollbar1=Tkinter.Scrollbar(self.rframe, command=self.text1.yview)
scrollbar1.grid(row=5, column=11, sticky='nswe')
self.text1.configure(yscrollcommand=scrollbar1.set)
self.text1.lower()
self.text2=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text2.grid(row=5, column=0, columnspan=10, stick='ns')
self.text2.lower()
self.text3=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text3.grid(row=5, column=0, columnspan=10, stick='ns')
self.text3.lower()
self.text4=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text4.grid(row=5, column=0, columnspan=10, stick='ns')
self.text4.lower()
self.text5=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text5.grid(row=5, column=0, columnspan=10, stick='ns')
self.text5.lower()
self.text6=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text6.grid(row=5, column=0, columnspan=10, stick='ns')
self.text6.lower()
def lift_prot_info(self):
self.text1.lift()
scrollbar1=Tkinter.Scrollbar(self.rframe, command=self.text1.yview)
scrollbar1.grid(row=5, column=11, sticky='nswe')
self.text1.configure(yscrollcommand=scrollbar1.set)
self.text2.lower()
self.text3.lower()
self.text4.lower()
self.text5.lower()
self.text6.lower()
def lift_lig_info(self):
self.text1.lower()
self.text2.lift()
scrollbar2=Tkinter.Scrollbar(self.rframe, command=self.text2.yview)
scrollbar2.grid(row=5, column=11, sticky='nswe')
self.text2.configure(yscrollcommand=scrollbar2.set)
self.text3.lower()
self.text4.lower()
self.text5.lower()
self.text6.lower()
def lift_pdb_file(self):
self.text3.lift()
scrollbar3=Tkinter.Scrollbar(self.rframe, command=self.text3.yview)
scrollbar3.grid(row=5, column=11, sticky='nswe')
self.text3.configure(yscrollcommand=scrollbar3.set)
self.text1.lower()
self.text2.lower()
self.text4.lower()
self.text5.lower()
self.text6.lower()
def lift_uniprot_file(self):
self.text4.lift()
scrollbar4=Tkinter.Scrollbar(self.rframe, command=self.text4.yview)
scrollbar4.grid(row=5, column=11, sticky='nswe')
self.text4.configure(yscrollcommand=scrollbar4.set)
self.text1.lower()
self.text2.lower()
self.text3.lower()
self.text5.lower()
self.text6.lower()
def lift_kegg_info(self):
self.text5.lift()
scrollbar5=Tkinter.Scrollbar(self.rframe, command=self.text5.yview)
scrollbar5.grid(row=5, column=11, sticky='nswe')
self.text5.configure(yscrollcommand=scrollbar5.set)
self.text1.lower()
self.text2.lower()
self.text3.lower()
self.text4.lower()
self.text6.lower()
def lift_ligss_info(self):
self.text6.lift()
scrollbar6=Tkinter.Scrollbar(self.rframe, command=self.text6.yview)
scrollbar6.grid(row=5, column=11, sticky='nswe')
self.text6.configure(yscrollcommand=scrollbar6.set)
self.text1.lower()
self.text2.lower()
self.text3.lower()
self.text4.lower()
self.text5.lower()
def file_upload(self):
toplevel1 = Tkinter.Toplevel()
toplevel1.withdraw()
self.userpdbfile = tkFileDialog.askopenfile(parent=toplevel1,mode='rb',title='Choose a file')
self.userpdbfile_path=self.userpdbfile.name
print self.userpdbfile_path
self.userpdb_filename=os.path.basename(self.userpdbfile_path)
self.userpdb_filename_noext=self.userpdb_filename.split('.')[0]
if self.userpdbfile != None:
self.label4.config(text=self.userpdb_filename)
def smiles_help(self):
#dnlkd
tkMessageBox.showinfo(title = 'Smiles', message = "To find similar ligands, paste your smile string in the entry box and hit Find Similar Ligands button. \n On Mac use Command+C to copy from the Data Panel and use Control+V to paste in the entry box")
def showLink(self, event, arg):
#fgdfg
webbrowser.open_new(arg)
def show_pathway(self, path):
toplevel = Tkinter.Toplevel()
#toplevel.grid(sticky='nswe')
toplevel.columnconfigure(0, weight=1)
toplevel.rowconfigure(0, weight=1)
Tframe=Tkinter.Frame(toplevel)
Tframe.grid(sticky='nswe')
Tframe.columnconfigure(0, weight=1)
Tframe.rowconfigure(0, weight=1)
PathwayImage=Tkinter.PhotoImage(file=path)
PhotoImage=Tkinter.Text(Tframe)
PhotoImage.image_create(Tkinter.END, image=PathwayImage)
PhotoImage.img=PathwayImage
PhotoImage.grid(row = 0, column=0, sticky='nswe')
scrollbar1=Tkinter.Scrollbar(Tframe, command=PhotoImage.yview)
scrollbar1.grid(row=0, column=1, sticky='nswe')
scrollbar2=Tkinter.Scrollbar(Tframe, orient=Tkinter.HORIZONTAL, command=PhotoImage.xview)
scrollbar2.grid(row=1, column=0, sticky='nswe')
PhotoImage.columnconfigure(0, weight=1)
PhotoImage.rowconfigure(0, weight=1)
PhotoImage.configure(yscrollcommand=scrollbar1.set)
PhotoImage.configure(xscrollcommand=scrollbar2.set)
PhotoImage.lift()
def get_similar_ligands(self):
self.ligssdir=os.path.join(self.outdir, "Similar_Ligands")
if os.path.exists(self.ligssdir):
os.chdir(self.ligssdir)
else:
os.mkdir(self.ligssdir)
os.chdir(self.ligssdir)
#print "Aquiring similar ligands...."
self.smiles=self.entryVariable3.get()
#print self.smiles
self.lift_ligss_info()
url="https://www.ebi.ac.uk/chemblws/compounds/similarity/"+self.smiles+"/70"
#print url
try:
self.text6.config(state=Tkinter.NORMAL)
self.text6.delete(1.0, Tkinter.END)
response_assay_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_assay_xml)
self.text6.insert(Tkinter.INSERT, "Similar Ligands: "+"\n\n")
self.text6.insert(Tkinter.INSERT, "ChemblID \t Similarity \t smiles \n\n")
fileh=open("Similar_Ligands.smi", "w")
idh=open("Similar_ligands.txt", "w")
idh.write("smiles \t ChemblID \t Similarity \n")
for i in root:
self.text6.insert(Tkinter.INSERT, i[1].text+"\t"+i[4].text+"\t"+i[0].text+"\n\n")
fileh.write(i[0].text+"\n")
idh.write(i[0].text+"\t"+i[1].text+"\t"+i[4].text+"\n")
fileh.close()
idh.close()
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for similar ligands!"
elif err.code == 403:
print "Access denied for similar ligands!"
else:
print "Something happened in similar ligands! Error code", err.code
def get_smiles(self, chembl_id):
print "Aquiring smiles....\n THIS COULD TAKE LONG TIME DEPENDING ON NUMBER OF MOLECULES THAT MATCHES CRITERION!!"
#tkMessageBox.showinfo(title="Aquiring smiles...", message="THIS COULD TAKE LONG TIME DEPENDING ON NUMBER OF MOLECULES THAT MATCHES CRITERION!!")
ids=chembl_id
smiles=list()
for i in ids:
url="http://www.ebi.ac.uk/chemblws/compounds/"+str(i)
try:
response_ligsmi_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_ligsmi_xml)
smiles.append(root[0].text)
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for smiles!"
elif err.code == 403:
print "Access denied for smiles!"
else:
print "Something else happened in smiles! Error code", err.code
return smiles
def get_info(self):
#print "1 Aquiring uniprot id from pdb id...."
self.pdb_id=self.entryVariable1.get().upper()
self.pdb_chain_id=self.entryVariable2.get().upper()
cwd = os.path.expanduser("~/Desktop/")
self.outdir = os.path.join(cwd, 'PyMine_Output_'+str(self.pdb_id))
if not os.path.exists(self.outdir):
os.mkdir(self.outdir)
os.chdir(self.outdir)
for line in urllib2.urlopen('http://www.uniprot.org/docs/pdbtosp.txt'):
if len(line.split())>1:
if self.pdb_id == str(line.split()[0]):
self.uniprot.append(str(line.split()[5])[1:-1])
self.text1.insert(Tkinter.INSERT, "PDB ID: "+self.pdb_id+ "\n\n")
if self.uniprot:
self.text1.insert(Tkinter.END, "Uniprot: " +', '.join(map(str, self.uniprot))+"\n\n")
else:
self.text1.insert(Tkinter.END, "Uniprot id not found " +"\n\n")
def get_user_info(self):
#print "1 Aquiring uniprot id from pdb id...."
self.pdb_id=self.userpdb_filename_noext
print self.pdb_id
#self.pdb_chain_id=self.entryVariable2.get().upper()
cwd = os.path.expanduser("~/Desktop/")
self.outdir = os.path.join(cwd, 'PyMine_Output_'+str(self.pdb_id))
if not os.path.exists(self.outdir):
os.mkdir(self.outdir)
os.chdir(self.outdir)
self.uniprot.append(self.entryVariable5.get().upper())
self.text1.insert(Tkinter.INSERT, "PDB File: "+self.pdb_id+ "\n\n")
if self.uniprot:
self.text1.insert(Tkinter.END, "Uniprot: " +', '.join(map(str, self.uniprot))+"\n\n")
else:
self.text1.insert(Tkinter.END, "Uniprot id not found " +"\n\n")
def show_pdb(self):
#print "2 Importing 3d structure...."
pymol.cmd.cd(self.outdir)
#print pymol.cmd.pwd()
current_pdb=self.pdb_id
#pymol.finish_launching()
if self.flag==1:
pymol.cmd.load(self.userpdbfile_path)
else:
pymol.cmd.fetch(current_pdb) #pymol.cmd.load("/Users/rrc/Desktop/pymol_plugin/2RH1.pdb",current_pdb)
pdbfilename=str(self.pdb_id)+".pdb"
#pymol.cmd.save(pdbfilename, current_pdb)
pymol.cmd.hide('everything', current_pdb)
#pymol.cmd.select("selection", current_pdb)
pymol.cmd.show('cartoon')
pymol.cmd.select('ligand', 'organic')
def get_pdb_file(self):
#print "3 Aquiring pdb and uniprot file...."
if self.flag==1:
pdbfile=open(self.userpdbfile_path, "r")
for line in pdbfile:
self.text3.insert(Tkinter.END, line)
else:
filename=str(self.pdb_id.lower())+".pdb"
pdbfile=open(filename, "r")
for line in pdbfile:
self.text3.insert(Tkinter.END, line)
def get_uniprot_file(self):
#print self.uniprot[0]
if self.uniprot:
fh=open(self.uniprot[0]+".txt", "w")
for line in urllib2.urlopen('http://www.uniprot.org/uniprot/'+self.uniprot[0]+'.txt'):
self.text4.insert(Tkinter.END, line)
fh.write(line)
fh.close()
else:
print "Error in uniprot id"
def get_ligands(self):
#print "4 Aquiring pdb ligands...."
try:
url="http://www.rcsb.org/pdb/rest/ligandInfo?structureId="+self.pdb_id
response_xml = urllib2.urlopen(url).read()
root=ET.fromstring(response_xml)
for i in root[0]:
chemid = i.attrib['chemicalID']
for j in i:
if j.tag=="smiles":
smiles=j.text
if j.tag=="chemicalName":
chem_name=j.text
self.ligands.append([chemid, chem_name, smiles])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for pdb ligands!"
elif err.code == 403:
print "Access denied for pdb ligands!"
else:
print "Something else happened in pdb ligands! Error code", err.code
if self.ligands:
self.text2.insert(Tkinter.END, "Ligands in PDB: \n\n")
for i in self.ligands:
self.text2.insert(Tkinter.END, ' '.join(map(str, i))+"\n\n")
else:
self.text2.insert(Tkinter.END, "Ligands not found\n\n")
def get_ligand_images(self):
#print "5 Aquiring pdb ligand images...."
self.ligdir=os.path.join(self.outdir, "Ligands")
if not os.path.exists(self.ligdir):
os.mkdir(self.ligdir)
os.chdir(self.ligdir)
if self.ligands:
for i in self.ligands:
chid=i[0]
#print "Working on "+ str(chid)
try:
url="http://www.ebi.ac.uk/chemblws/compounds/smiles/"+i[2]
#print url
response_xml_chemblids=urllib2.urlopen(url).read()
root=ET.fromstring(response_xml_chemblids)
if len(root)>0:
lig_chemblID=root[0].find("chemblId").text
self.ligand_chemblid.append([chid, lig_chemblID])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for ligand images!"
elif err.code == 403:
print "Access denied for ligand images!"
else:
print "Ignored smiles retrieval for ions!"
else:
print "Ligands not present"
if self.ligand_chemblid:
for i in self.ligand_chemblid:
url="http://www.ebi.ac.uk/chemblws/compounds/"+i[1]+"/image"
#print url
imgRequest = urllib2.Request(url)
imgData=urllib2.urlopen(imgRequest).read()
self.ligand_images.append(imgData)
fh=open(str(i[1])+".gif", "w")
fh.write(imgData)
fh.close()
else:
print "Ligand chembl id not found"
def get_target_chembl_id(self):
#print "6 Aquiring target chembl id...."
if self.uniprot:
url="http://www.ebi.ac.uk/chemblws/targets/uniprot/"+str(self.uniprot[0])
#print url
try:
response_assay_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_assay_xml)
for i in root:
#print i.tag
if i.tag =="preferredName":
self.common_name=str(i.text)
if i.tag =="organism":
self.organism=str(i.text)
if i.tag=="chemblId":
self.target_chemblID=i.text
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for target chembl id!"
elif err.code == 403:
print "Access denied for target chembl id!"
else:
print "Something happened in target chembl id! Error code", err.code
else:
print "Error in uniprot id!"
self.text2.insert(Tkinter.END, "Could not retrieve assay information because uniprot id missing\n\n")
if self.target_chemblID:
#print self.target_chemblID
self.get_assay_info()
if self.ec50_comps:
#print "EC50 data available"
self.text2.insert(Tkinter.END, "Compounds with EC50 values <=100 nM:"+"\n\n")
self.text2.insert(Tkinter.END, ' '.join(map(str, self.ec50_comps))+"\n\n")
else:
self.text2.insert(Tkinter.END, "EC50 data not available"+"\n\n")
if self.ic50_comps:
#print "IC50 data available"
self.text2.insert(Tkinter.END, "Compounds with IC50 values <=100 nM:"+"\n\n")
self.text2.insert(Tkinter.END, ' '.join(map(str, self.ic50_comps))+"\n\n")
else:
self.text2.insert(Tkinter.END, "IC50 data not avaialble"+"\n\n")
if self.ki_comps:
#print "KI data available"
self.text2.insert(Tkinter.END, "Compounds with Ki values <=100 nM:"+"\n\n")
self.text2.insert(Tkinter.END, ' '.join(map(str, self.ki_comps))+"\n\n")
else:
self.text2.insert(Tkinter.END, "Ki data not available"+"\n\n")
else:
self.text2.insert(Tkinter.END, "Assay data not available"+"\n\n")
def get_approved_drugs(self):
#print "7 Aquiring approved drugs...."
try:
url="http://www.ebi.ac.uk/chemblws/targets/"+self.target_chemblID+"/approvedDrug"
response_approved_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_approved_xml)
for i in root:
molecule =list()
if len(i)==0:
break
else:
for j in i:
molecule.append([j.tag, j.text])
self.approved_drugs.append(molecule)
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for approved drugs!"
elif err.code == 403:
print "Access denied for approved drugs!"
else:
print "Something happened in aquiring approved_drugs! Error code", err.code
def show_lig_info(self):
#print "8 Showing approved drug information...."
os.chdir(self.ligdir)
self.agonist=list()
self.antagonist=list()
if not self.approved_drugs:
self.text2.insert(Tkinter.END, "No Approved Drugs found for this receptor"+"\n\n")
else:
self.text2.insert(Tkinter.END, "Approved Drugs: \n\n")
for i in self.approved_drugs:
#print i[2][1].split()[-1]
if i[2][1].split()[-1] == "agonist":
self.agonist.append([i])
if i[2][1].split()[-1] =="antagonist":
self.antagonist.append([i])
#self.text2.insert(Tkinter.END, ''.join(map(str, self.approved_drugs))+"\n\n")
if self.agonist:
self.text2.insert(Tkinter.END, "Agonists: \n\n")
for i in self.agonist:
for j in i:
for k in j:
self.text2.insert(Tkinter.END, ' '.join(map(str, k))+"\n")
self.text2.insert(Tkinter.END, "\n\n")
if self.antagonist:
self.text2.insert(Tkinter.END, "Antagonists: \n\n")
for i in self.antagonist:
for j in i:
for k in j:
self.text2.insert(Tkinter.END, ' '.join(map(str, k))+"\n")
self.text2.insert(Tkinter.END, "\n\n")
if self.agonist:
fh=open("Approved_agonist.txt", "w")
for i in self.agonist:
for j in i:
fh.write(str(j[0][1]+"\n"))
fh.close()
if (self.antagonist):
fh=open("Approved_antagonist.txt", "w")
for i in self.antagonist:
for j in i:
fh.write(str(j[0][1]+"\n"))
fh.close()
def get_saps(self):
#print "9 Aquiring saps...."
for line in urllib2.urlopen('http://www.uniprot.org/docs/humsavar.txt'):
if len(line.split())>1:
if str(self.uniprot[0]) == str(line.split()[1]):
gene_name=line.split()[0]
mutation=line.split()[3][2:]
origres=mutation[:3]
num=mutation[3:-3]
changedres=mutation[-3:]
disease=line.split()[6:]
self.saps.append([origres, num, changedres, disease])
#print gene_name, mutation, origres, num, changedres
if self.saps:
self.show_saps()
self.text1.insert(Tkinter.END, "Single Amino Acid Polymoprphism:\n\n"+ '\n'.join(map(str, self.saps))+"\n\n")
else:
print "SAPs not found"
self.text1.insert(Tkinter.END, "Single Amino Acid Polymoprphism not found"+"\n\n")
def show_saps(self):
#print "10 Showing SAPS...."
sap_residues=list()
sap_res_str=''
for i in self.saps:
if i[1] not in sap_residues:
sap_residues.append(i[1])
for i in sap_residues:
sap_res_str="resi " + str(i)
#print sap_res_str
pymol.cmd.select("SAPs", sap_res_str)
pymol.cmd.show("spheres", sap_res_str)
pymol.cmd.deselect()
def get_bs(self):
#print "11 Aquiring binding site information...."
lig_bs=list()
ppi_bs=list()
dna_bs=list()
rna_bs=list()
ion_bs=list()
pep_bs=list()
try:
for line in urllib2.urlopen("https://dl.dropboxusercontent.com/u/61033253/ibisdown/"+self.pdb_id[1:-1]+"/"+self.pdb_id+".txt"):
spline=line.split(":") #Query:Interaction_type:Mmdb_Residue_No:PDB_Residue_No:Binding_Site_Residues:Binding_Site_Conservation:Avg_PercentID:Singleton:PISA_validation:Biol_Chemical_validation:Site_CDD_Annotation:Interaction_Partner:PDB_Evidence:Is_Observed:Ranking_Score:Query_Domain
if spline[1]=="LIG" and spline[0][-1:]==self.pdb_chain_id:
lig_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="PPI" and spline[0][-1:]==self.pdb_chain_id:
ppi_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="DNA" and spline[0][-1:]==self.pdb_chain_id:
dna_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="RNA" and spline[0][-1:]==self.pdb_chain_id:
rna_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="ION" and spline[0][-1:]==self.pdb_chain_id:
ion_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="PEP" and spline[0][-1:]==self.pdb_chain_id:
pep_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for binding sites!"
elif err.code == 403:
print "Access denied for binding sites!"
else:
print "Something else happened in getting binding site information! Error code", err.code
self.binding_sites=[lig_bs, ppi_bs, dna_bs, rna_bs, ion_bs, pep_bs]
def show_bs(self):
#print "12 Showing binding sites...."
counter=0
for i in self.binding_sites[0]:
counter+=1
self.lig_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("lig_bs"+str(counter), self.lig_bs_residues)
pymol.cmd.deselect()
pymol.cmd.group("Ligand_Binding_Sites", "lig_bs*")
counter=0
for i in self.binding_sites[1]:
counter+=1
self.ppi_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("ppi_bs"+str(counter), self.ppi_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("PPI_Sites", "ppi_bs*")
counter=0
for i in self.binding_sites[2]:
counter+=1
self.dna_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("dna_bs"+str(counter), self.dna_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("DNA_Binding_Sites", "dna_bs*")
counter=0
for i in self.binding_sites[3]:
counter+=1
self.rna_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("rna_bs"+str(counter), self.rna_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("RNA_Binding_Sites", "rna_bs*")
counter=0
for i in self.binding_sites[4]:
counter+=1
#print counter
self.ion_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("ion_bs"+str(counter), self.ion_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("ION_Bindins_Sites", "ion_bs*")
counter=0
for i in self.binding_sites[5]:
counter+=1
self.pep_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("pep_bs"+str(counter), self.pep_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("PEP_Binding_Sites", "pep_bs*")
if len(self.binding_sites[0])==0 and len(self.binding_sites[1])==0 and len(self.binding_sites[2])==0 and len(self.binding_sites[3])==0 and len(self.binding_sites[4])==0 and len(self.binding_sites[5])==0:
self.text1.insert(Tkinter.END, "Binding site data not found\n")
else:
self.text1.insert(Tkinter.END, "Binding Sites/Similar Binding Sites: \n\n")
for i in self.binding_sites:
for j in i:
self.text1.insert(Tkinter.END, '\t'.join(map(str, j))+"\n\n")
def get_assay_info(self):
#print "13 Aquiring assay information...."
self.ligdir=os.path.join(self.outdir, "Ligands")
if not os.path.exists(self.ligdir):
os.mkdir(self.ligdir)
os.chdir(self.ligdir)
#os.chdir(self.ligdir)
url="http://www.ebi.ac.uk/chemblws/targets/"+self.target_chemblID+"/bioactivities"
try:
response_xml_chemblids=urllib2.urlopen(url).read()
root=ET.fromstring(response_xml_chemblids)
for i in root:
if i[6].text=="EC50" and i[13].text=="=" and i[12].text!="Unspecified" and float(i[12].text)<=10 and i[9].text=="nM":
self.ec50_comps.append(i[4].text)
elif i[6].text=="IC50" and i[13].text=="=" and i[12].text!="Unspecified" and float(i[12].text)<=10 and i[9].text=="nM":
self.ic50_comps.append(i[4].text)
elif i[6].text=="Ki" and i[13].text=="=" and i[12].text!="Unspecified" and float(i[12].text)<=10 and i[9].text=="nM":
self.ki_comps.append(i[4].text)
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for assay data!"
elif err.code == 403:
print "Access denied for assay data!"
else:
print "Something else happened in get_assay_info! Error code", err.code
if self.ec50_comps:
#print self.ec50_comps
#print "EC50 data available"
ec50_fh=open("EC50.txt", "w")
for i in self.ec50_comps:
ec50_fh.write(str(i)+"\n")
ec50_fh.close()
ec50_smi=self.get_smiles(self.ec50_comps)
if ec50_smi:
ec50smi_fh=open("EC50.smi", "w")
ec50smi_fh.write('\n'.join(map(str, ec50_smi)))
ec50smi_fh.close()
else:
print "EC50 data not available"
if self.ic50_comps:
#print "IC50 data available"
ic50_fh=open("IC50.txt", "w")
for i in self.ic50_comps:
ic50_fh.write(str(i)+"\n")
ic50_fh.close()
ic50_smi=self.get_smiles(self.ic50_comps)
if ic50_smi:
ic50smi_fh=open("IC50.smi", "w")
ic50smi_fh.write('\n'.join(map(str, ic50_smi)))
ic50smi_fh.close()
else:
print "IC50 data not available"
if self.ki_comps:
#print "Ki data available"
ki_fh=open("KI.txt", "w")
for i in self.ki_comps:
ki_fh.write(str(i)+"\n")
ki_fh.close()
ki_smi=self.get_smiles(self.ki_comps)
if ki_smi:
ki_smi_fh=open("KI.smi", "w")
ki_smi_fh.write('\n'.join(map(str, ki_smi)))
ki_smi_fh.close()
else:
print "Ki data not available"
def get_kegg_info(self):
#print "15 Aquiring pathway information...."
#print os.getcwd()
url = 'http://rest.genome.jp/link/genes/uniprot:'+self.uniprot[0]+'/original'
#print "Aquiring genes...."
self.kegg_genes=list()
try:
response = urllib2.urlopen(url)
for line in response:
self.kegg_genes.append(line.split()[1])
"""
for i in self.kegg_genes:
self.text5.insert(Tkinter.INSERT, str(i)+ "\n\n")
"""
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for genes!"
elif err.code == 403:
print "Access denied for genes!"
else:
print "Something happened in Kegg genes! Error code", err.code
#### genes to pathway ids
if self.kegg_genes:
for i in self.kegg_genes:
url = 'http://rest.genome.jp/link/path/'+i+'/original'
#print "Aquiring kegg pathaway id...."
self.kegg_pathway_ids=list()
try:
response = urllib2.urlopen(url)
for line in response:
self.kegg_pathway_ids.append(line.split()[1])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for pathway ids!"
elif err.code == 403:
print "Access denied!"
else:
print "Something happened in Kegg pathway ids! Error code", err.code
### get pathway information
if self.kegg_pathway_ids:
for i in self.kegg_pathway_ids:
url= 'http://rest.kegg.jp/get/'+i
#print "Aquiring Kegg Pathways...."
try:
response = urllib2.urlopen(url)
for line in response:
if line.startswith('CLASS'):
break
self.text5.insert(Tkinter.INSERT, line +"\n")
# For the pathway information hyperlink
self.text5.insert(Tkinter.INSERT, url+"\n\n", ('link'))
self.text5.tag_config('link', foreground="blue", underline=1)
self.text5.tag_bind('link', '<Button-1>', lambda event, arg=url: self.showLink(event, arg))
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for kegg pathways!"
elif err.code == 403:
print "Access denied for kegg pathways!"
else:
print "Something happened in Kegg pathways! Error code", err.code
### Get pathway images
url= 'http://rest.kegg.jp/get/'+i+'/image'
#print "Aquiring pathaway images...."
try:
imgRequest = urllib2.Request(url)
imgData=urllib2.urlopen(imgRequest).read()
self.pathwaydir=os.path.join(self.outdir, "Pathways")
if os.path.exists(self.pathwaydir):
os.chdir(self.pathwaydir)
else:
os.mkdir(self.pathwaydir)
os.chdir(self.pathwaydir)
filename=i.split(':')[1]
fh=open(filename+".gif", "w")
fh.write(imgData)
fh.close()
path_image=self.pathwaydir+"/"+filename+".gif"
#print path_image
ButtonImage=Tkinter.PhotoImage(file=path_image)
#print ButtonImage
path_button=Tkinter.Button(self.text5, text="Pathway Image", command=lambda j=path_image: self.show_pathway(j))
#path_button.img=ButtonImage
self.text5.window_create(Tkinter.INSERT, window=path_button)
self.text5.insert(Tkinter.INSERT, "\n----------------------------X--------------------------\n\n\n")
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for pathway images!"
elif err.code == 403:
print "Access denied for pathway images!"
else:
print "Something happened in Kegg pathway images! Error code", err.code
else:
print "Kegg pathway ids not found"
self.text5.insert(Tkinter.INSERT, "Kegg data not found.\n\n")
else:
print "Kegg gene not available"
self.text5.insert(Tkinter.INSERT, "Kegg data not found.\n\n")
#print os.getcwd()
def get_results(self):
self.text1.lift()
self.text1.config(state=Tkinter.NORMAL)
self.text1.delete(1.0, Tkinter.END)
self.text2.config(state=Tkinter.NORMAL)
self.text2.delete(1.0, Tkinter.END)
self.text3.config(state=Tkinter.NORMAL)
self.text3.delete(1.0, Tkinter.END)
self.text4.config(state=Tkinter.NORMAL)
self.text4.delete(1.0, Tkinter.END)
self.text5.config(state=Tkinter.NORMAL)
self.text5.delete(1.0, Tkinter.END)
self.text6.config(state=Tkinter.NORMAL)
self.text6.delete(1.0, Tkinter.END)
if self.userpdbfile!=None and self.entryVariable5.get()!=None:
self.flag=1
self.get_user_info()
self.show_pdb()
self.get_pdb_file()
self.get_uniprot_file()
self.get_target_chembl_id()
self.get_approved_drugs()
self.show_lig_info()
self.get_saps()
#self.get_bs()
#self.show_bs()
self.get_kegg_info()
else:
self.get_info()
self.show_pdb()
self.get_pdb_file()
self.get_uniprot_file()
self.get_ligands()
self.get_ligand_images()
self.get_target_chembl_id()
self.get_approved_drugs()
self.show_lig_info()
self.get_saps()
self.get_bs()
self.show_bs()
self.get_kegg_info()
self.text1.config(state=Tkinter.DISABLED)
self.text2.config(state=Tkinter.DISABLED)
self.text3.config(state=Tkinter.DISABLED)
self.text4.config(state=Tkinter.DISABLED)
self.text5.config(state=Tkinter.DISABLED)
self.text6.config(state=Tkinter.DISABLED)
def clear(self):
#Clear All the variables so that if we change the pdbid it will recreate the screen.
self.text1.config(state=Tkinter.NORMAL)
self.text1.delete(1.0, Tkinter.END)
self.text2.config(state=Tkinter.NORMAL)
self.text2.delete(1.0, Tkinter.END)
self.text3.config(state=Tkinter.NORMAL)
self.text3.delete(1.0, Tkinter.END)
self.text4.config(state=Tkinter.NORMAL)
self.text4.delete(1.0, Tkinter.END)
self.text5.config(state=Tkinter.NORMAL)
self.text5.delete(1.0, Tkinter.END)
self.text6.config(state=Tkinter.NORMAL)
self.text6.delete(1.0, Tkinter.END)
self.flag=0
self.pdb_id=''
self.pdb_chain_id=''
self.entryVariable1.set('')
self.entryVariable2.set('')
self.entryVariable3.set('')
self.entryVariable5.set(None)
self.userpdbfile=None
self.userpdbfile_path=''
self.userpdb_filename=''
self.userpdb_filename_noext=''
self.label4.config(text='')
cwd=os.path.expanduser("~/Desktop/")
self.pdb_file=''
self.smiles=''
self.name=list()
self.summary=list()
self.symbol=list()
self.uniprot=list()
self.binding_sites=list()
self.ppi_bs_residues=''
self.lig_bs_residues=''
self.dna_bs_residues=''
self.rna_bs_residues=''
self.ion_bs_residues=''
self.pep_bs_residues=''
self.pathways=list()
self.saps=list()
self.ligands=list()
self.ligand_chemblid=list()
self.ligand_images=list()
self.agonist=list()
self.antagonist=list()
self.ki_comps=list()
self.ec50_comps=list()
self.ic50_comps=list()
self.outdir=None
pdbfile=None
pymol.cmd.delete('all')
pymol.cmd.reinitialize()
self.text1.lift()
def main():
app = PyMine(None)
app.title('PyMine Data Integration')
app.mainloop()
if __name__ == "__main__":
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalIndexTest(PandasOnSparkTestCase, TestUtils):
def test_categorical_index(self):
pidx = pd.CategoricalIndex([1, 2, 3])
psidx = ps.CategoricalIndex([1, 2, 3])
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pidx = pd.Index([1, 2, 3], dtype="category")
psidx = ps.Index([1, 2, 3], dtype="category")
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
psdf = ps.from_pandas(pdf)
pidx = pdf.set_index("b").index
psidx = psdf.set_index("b").index
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pidx = pdf.set_index(["a", "b"]).index.get_level_values(0)
psidx = psdf.set_index(["a", "b"]).index.get_level_values(0)
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
def test_categories_setter(self):
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
psdf = ps.from_pandas(pdf)
pidx = pdf.index
psidx = psdf.index
pidx.categories = ["z", "y", "x"]
psidx.categories = ["z", "y", "x"]
if LooseVersion(pd.__version__) >= LooseVersion("1.0.5"):
self.assert_eq(pidx, psidx)
self.assert_eq(pdf, psdf)
else:
pidx = pidx.set_categories(pidx.categories)
pdf.index = pidx
self.assert_eq(pidx, psidx)
self.assert_eq(pdf, psdf)
with self.assertRaises(ValueError):
psidx.categories = [1, 2, 3, 4]
def test_add_categories(self):
pidx = pd.CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.add_categories(4), psidx.add_categories(4))
self.assert_eq(pidx.add_categories([4, 5]), psidx.add_categories([4, 5]))
self.assert_eq(pidx.add_categories([]), psidx.add_categories([]))
self.assertRaises(ValueError, lambda: psidx.add_categories(4, inplace=True))
self.assertRaises(ValueError, lambda: psidx.add_categories(3))
self.assertRaises(ValueError, lambda: psidx.add_categories([4, 4]))
def test_remove_categories(self):
pidx = pd.CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.remove_categories(2), psidx.remove_categories(2))
self.assert_eq(pidx.remove_categories([1, 3]), psidx.remove_categories([1, 3]))
self.assert_eq(pidx.remove_categories([]), psidx.remove_categories([]))
self.assert_eq(pidx.remove_categories([2, 2]), psidx.remove_categories([2, 2]))
self.assert_eq(pidx.remove_categories([1, 2, 3]), psidx.remove_categories([1, 2, 3]))
self.assert_eq(pidx.remove_categories(None), psidx.remove_categories(None))
self.assert_eq(pidx.remove_categories([None]), psidx.remove_categories([None]))
self.assertRaises(ValueError, lambda: pidx.remove_categories(4, inplace=True))
self.assertRaises(ValueError, lambda: psidx.remove_categories(4))
self.assertRaises(ValueError, lambda: psidx.remove_categories([4, None]))
def test_remove_unused_categories(self):
pidx = pd.CategoricalIndex([1, 4, 5, 3], categories=[4, 3, 2, 1])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.remove_unused_categories(), psidx.remove_unused_categories())
self.assertRaises(ValueError, lambda: psidx.remove_unused_categories(inplace=True))
def test_reorder_categories(self):
pidx = pd.CategoricalIndex([1, 2, 3])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.reorder_categories([1, 2, 3]), psidx.reorder_categories([1, 2, 3]))
self.assert_eq(
pidx.reorder_categories([1, 2, 3], ordered=True),
psidx.reorder_categories([1, 2, 3], ordered=True),
)
self.assert_eq(pidx.reorder_categories([3, 2, 1]), psidx.reorder_categories([3, 2, 1]))
self.assert_eq(
pidx.reorder_categories([3, 2, 1], ordered=True),
psidx.reorder_categories([3, 2, 1], ordered=True),
)
self.assertRaises(ValueError, lambda: pidx.reorder_categories([1, 2, 3], inplace=True))
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2]))
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2, 4]))
self.assertRaises(ValueError, lambda: psidx.reorder_categories([1, 2, 2]))
self.assertRaises(TypeError, lambda: psidx.reorder_categories(1))
def test_as_ordered_unordered(self):
pidx = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x"])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.as_ordered(), psidx.as_ordered())
self.assert_eq(pidx.as_unordered(), psidx.as_unordered())
self.assertRaises(ValueError, lambda: psidx.as_ordered(inplace=True))
self.assertRaises(ValueError, lambda: psidx.as_unordered(inplace=True))
def test_astype(self):
pidx = pd.Index(["a", "b", "c"])
psidx = ps.from_pandas(pidx)
self.assert_eq(psidx.astype("category"), pidx.astype("category"))
self.assert_eq(
psidx.astype(CategoricalDtype(["c", "a", "b"])),
pidx.astype(CategoricalDtype(["c", "a", "b"])),
)
pcidx = pidx.astype(CategoricalDtype(["c", "a", "b"]))
pscidx = psidx.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(pscidx.astype("category"), pcidx.astype("category"))
# CategoricalDtype is not updated if the dtype is same from pandas 1.3.
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
self.assert_eq(
pscidx.astype(CategoricalDtype(["b", "c", "a"])),
pcidx.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
pscidx.astype(CategoricalDtype(["b", "c", "a"])),
pcidx,
)
self.assert_eq(pscidx.astype(str), pcidx.astype(str))
def test_factorize(self):
pidx = pd.CategoricalIndex([1, 2, 3, None])
psidx = ps.from_pandas(pidx)
pcodes, puniques = pidx.factorize()
kcodes, kuniques = psidx.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pidx.factorize(na_sentinel=-2)
kcodes, kuniques = psidx.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_append(self):
pidx1 = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
pidx2 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x", "w"])
pidx3 = pd.Index(["y", "x", "w", "z"])
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1.append(psidx2), pidx1.append(pidx2))
self.assert_eq(
psidx1.append(psidx3.astype("category")), pidx1.append(pidx3.astype("category"))
)
# TODO: append non-categorical or categorical with a different category
self.assertRaises(NotImplementedError, lambda: psidx1.append(psidx3))
pidx4 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x"])
psidx4 = ps.from_pandas(pidx4)
self.assertRaises(NotImplementedError, lambda: psidx1.append(psidx4))
def test_union(self):
pidx1 = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
pidx2 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x", "w"])
pidx3 = pd.Index(["y", "x", "w", "z"])
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1.union(psidx2), pidx1.union(pidx2))
self.assert_eq(
psidx1.union(psidx3.astype("category")), pidx1.union(pidx3.astype("category"))
)
# TODO: union non-categorical or categorical with a different category
self.assertRaises(NotImplementedError, lambda: psidx1.union(psidx3))
pidx4 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x"])
psidx4 = ps.from_pandas(pidx4)
self.assertRaises(NotImplementedError, lambda: psidx1.union(psidx4))
def test_intersection(self):
pidx1 = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
pidx2 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x", "w"])
pidx3 = pd.Index(["y", "x", "w", "z"])
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psidx1.intersection(psidx2).sort_values(), pidx1.intersection(pidx2).sort_values()
)
self.assert_eq(
psidx1.intersection(psidx3.astype("category")).sort_values(),
pidx1.intersection(pidx3.astype("category")).sort_values(),
)
else:
self.assert_eq(
psidx1.intersection(psidx2).sort_values(),
pidx1.intersection(pidx2).set_categories(pidx1.categories).sort_values(),
)
self.assert_eq(
psidx1.intersection(psidx3.astype("category")).sort_values(),
pidx1.intersection(pidx3.astype("category"))
.set_categories(pidx1.categories)
.sort_values(),
)
# TODO: intersection non-categorical or categorical with a different category
self.assertRaises(NotImplementedError, lambda: psidx1.intersection(psidx3))
pidx4 = pd.CategoricalIndex(["y", "x", "w"], categories=["z", "y", "x"])
psidx4 = ps.from_pandas(pidx4)
self.assertRaises(NotImplementedError, lambda: psidx1.intersection(psidx4))
def test_insert(self):
pidx = pd.CategoricalIndex(["x", "y", "z"], categories=["z", "y", "x", "w"])
psidx = ps.from_pandas(pidx)
self.assert_eq(psidx.insert(1, "w"), pidx.insert(1, "w"))
def test_rename_categories(self):
pidx = pd.CategoricalIndex(["a", "b", "c", "d"])
psidx = ps.from_pandas(pidx)
self.assert_eq(pidx.rename_categories([0, 1, 3, 2]), psidx.rename_categories([0, 1, 3, 2]))
self.assert_eq(
pidx.rename_categories({"a": "A", "c": "C"}),
psidx.rename_categories({"a": "A", "c": "C"}),
)
self.assert_eq(
pidx.rename_categories(lambda x: x.upper()),
psidx.rename_categories(lambda x: x.upper()),
)
self.assertRaises(
TypeError,
lambda: psidx.rename_categories(None),
)
self.assertRaises(
TypeError,
lambda: psidx.rename_categories(1),
)
self.assertRaises(
TypeError,
lambda: psidx.rename_categories("x"),
)
def test_set_categories(self):
pidx = pd.CategoricalIndex(["a", "b", "c", "d"])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "o"]),
psidx.set_categories(["a", "c", "b", "o"]),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b"]),
psidx.set_categories(["a", "c", "b"]),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "d", "e"]),
psidx.set_categories(["a", "c", "b", "d", "e"]),
)
self.assert_eq(
pidx.set_categories([0, 1, 3, 2], rename=True),
psidx.set_categories([0, 1, 3, 2], rename=True),
)
self.assert_eq(
pidx.set_categories([0, 1, 3], rename=True),
psidx.set_categories([0, 1, 3], rename=True),
)
self.assert_eq(
pidx.set_categories([0, 1, 3, 2, 4], rename=True),
psidx.set_categories([0, 1, 3, 2, 4], rename=True),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "o"], ordered=True),
psidx.set_categories(["a", "c", "b", "o"], ordered=True),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b"], ordered=True),
psidx.set_categories(["a", "c", "b"], ordered=True),
)
self.assert_eq(
pidx.set_categories(["a", "c", "b", "d", "e"], ordered=True),
psidx.set_categories(["a", "c", "b", "d", "e"], ordered=True),
)
self.assertRaisesRegex(
ValueError,
"cannot use inplace with CategoricalIndex",
lambda: psidx.set_categories(["a", "c", "b", "o"], inplace=True),
)
def test_map(self):
pidxs = [pd.CategoricalIndex([1, 2, 3]), pd.CategoricalIndex([1, 2, 3], ordered=True)]
psidxs = [ps.from_pandas(pidx) for pidx in pidxs]
for pidx, psidx in zip(pidxs, psidxs):
# Apply dict
self.assert_eq(
pidx.map({1: "one", 2: "two", 3: "three"}),
psidx.map({1: "one", 2: "two", 3: "three"}),
)
self.assert_eq(
pidx.map({1: "one", 2: "two", 3: "one"}),
psidx.map({1: "one", 2: "two", 3: "one"}),
)
self.assert_eq(
pidx.map({1: "one", 2: "two"}),
psidx.map({1: "one", 2: "two"}),
)
self.assert_eq(
pidx.map({1: "one", 2: "two"}),
psidx.map({1: "one", 2: "two"}),
)
self.assert_eq(
pidx.map({1: 10, 2: 20}),
psidx.map({1: 10, 2: 20}),
)
# Apply lambda
self.assert_eq(
pidx.map(lambda id: id + 1),
psidx.map(lambda id: id + 1),
)
self.assert_eq(
pidx.map(lambda id: id + 1.1),
psidx.map(lambda id: id + 1.1),
)
self.assert_eq(
pidx.map(lambda id: "{id} + 1".format(id=id)),
psidx.map(lambda id: "{id} + 1".format(id=id)),
)
# Apply series
pser = pd.Series(["one", "two", "three"], index=[1, 2, 3])
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
pser = pd.Series(["one", "two", "three"])
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
pser = pd.Series([1, 2, 3])
self.assert_eq(
pidx.map(pser),
psidx.map(pser),
)
self.assertRaises(
TypeError,
lambda: psidx.map({1: 1, 2: 2.0, 3: "three"}),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.indexes.test_category import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
import os
import re
import warnings
from ctypes import c_char_p
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name,
GeoIP_database_info, GeoIP_delete, GeoIP_lib_version, GeoIP_open,
GeoIP_record_by_addr, GeoIP_record_by_name,
)
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
class GeoIPException(Exception):
pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
warnings.warn(
"django.contrib.gis.geoip is deprecated in favor of "
"django.contrib.gis.geoip2 and the MaxMind GeoLite2 database "
"format.", RemovedInDjango20Warning, 2
)
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH')
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if GeoIP_delete is None:
return
if self._country:
GeoIP_delete(self._country)
if self._city:
GeoIP_delete(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP only takes bytestrings.
return force_bytes(query)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(enc_query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(enc_query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, enc_query)
else:
return GeoIP_country_code_by_name(self._country, enc_query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, enc_query)
else:
return GeoIP_country_name_by_name(self._country, enc_query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code': self.country_code(query),
'country_name': self.country_name(query),
}
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Returns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
# #### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import sys
import testtools
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
super(ServersNegativeTestJSON, self).tearDown()
@classmethod
def setUpClass(cls):
super(ServersNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
if CONF.compute.allow_tenant_isolation:
cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
else:
cls.alt_os = clients.AltManager()
cls.alt_client = cls.alt_os.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@test.attr(type=['negative', 'gate'])
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name='')
@test.attr(type=['negative', 'gate'])
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
personality=person)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
image_id=-1)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
flavor=-1,)
@test.attr(type=['negative', 'gate'])
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@test.attr(type=['negative', 'gate'])
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@test.attr(type=['negative', 'gate'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@test.attr(type=['negative', 'gate'])
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
_, server = self.create_test_server()
self.client.delete_server(server['id'])
self.client.wait_for_server_termination(server['id'])
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(exceptions.NotFound, self.client.reboot,
server['id'], 'SOFT')
@test.attr(type=['negative', 'gate'])
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name('server')
file_contents = 'Test server rebuild.'
personality = [{'path': '/etc/rebuild.txt',
'contents': base64.b64encode(file_contents)}]
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt,
name=new_name, meta=meta,
personality=personality,
adminPass='rebuild')
@test.attr(type=['negative', 'gate'])
def test_create_numeric_server_name(self):
# Create a server with a numeric name
if self.__class__._interface == "xml":
raise self.skipException("Not testable in XML")
server_name = 12345
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative', 'gate'])
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative', 'gate'])
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
networks=networks)
@test.attr(type=['negative', 'gate'])
def test_create_with_non_existent_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
key_name=key_name)
@test.attr(type=['negative', 'gate'])
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises(exceptions.OverLimit,
self.create_test_server,
meta=metadata)
@test.attr(type=['negative', 'gate'])
def test_update_name_of_non_existent_server(self):
# Update name of a non-existent server
server_name = data_utils.rand_name('server')
new_name = data_utils.rand_name('server') + '_updated'
self.assertRaises(exceptions.NotFound, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_set_empty_name(self):
# Update name of the server to an empty string
server_name = data_utils.rand_name('server')
new_name = ''
self.assertRaises(exceptions.BadRequest, self.client.update_server,
server_name, name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_of_another_tenant(self):
# Update name of a server that belongs to another tenant
new_name = self.server_id + '_new'
self.assertRaises(exceptions.NotFound,
self.alt_client.update_server, self.server_id,
name=new_name)
@test.attr(type=['negative', 'gate'])
def test_update_server_name_length_exceeds_256(self):
# Update name of server exceed the name length limit
new_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.client.update_server,
self.server_id,
name=new_name)
@test.attr(type=['negative', 'gate'])
def test_delete_non_existent_server(self):
# Delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.delete_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
self.assertRaises(exceptions.NotFound,
self.alt_client.delete_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_delete_server_pass_negative_id(self):
# Pass an invalid string parameter to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server, -1)
@test.attr(type=['negative', 'gate'])
def test_delete_server_pass_id_exceeding_length_limit(self):
# Pass a server ID that exceeds length limit to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server,
sys.maxint + 1)
@test.attr(type=['negative', 'gate'])
def test_create_with_nonexistent_security_group(self):
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
security_groups=security_groups)
@test.attr(type=['negative', 'gate'])
def test_get_non_existent_server(self):
# Get a non existent server details
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.get_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_stop_non_existent_server(self):
# Stop a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.servers_client.stop,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_non_existent_server(self):
# pause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.pause_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.unpause_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
self.assertRaises(exceptions.Conflict,
self.client.unpause_server,
self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.suspend_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
self.client.resume_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
# resume a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.resume_server,
nonexistent_server)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_server_invalid_state(self):
# resume an active server.
self.assertRaises(exceptions.Conflict,
self.client.resume_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_get_console_output_of_non_existent_server(self):
# get the console output for a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.get_console_output,
nonexistent_server, 10)
@test.attr(type=['negative', 'gate'])
def test_force_delete_nonexistent_server_id(self):
# force-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.force_delete_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_force_delete_server_invalid_state(self):
# we can only force-delete a server in 'soft-delete' state
self.assertRaises(exceptions.Conflict,
self.client.force_delete_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_restore_nonexistent_server_id(self):
# restore-delete a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
self.client.restore_soft_deleted_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_restore_server_invalid_state(self):
# we can only restore-delete a server in 'soft-delete' state
self.assertRaises(exceptions.Conflict,
self.client.restore_soft_deleted_server,
self.server_id)
@test.attr(type=['negative', 'gate'])
def test_shelve_non_existent_server(self):
# shelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.shelve_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
resp, server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
params = {'name': image_name}
resp, images = self.images_client.list_images(params)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.assertRaises(exceptions.Conflict,
self.client.shelve_server,
self.server_id)
self.client.unshelve_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.unshelve_server,
nonexistent_server)
@test.attr(type=['negative', 'gate'])
def test_unshelve_server_invalid_state(self):
# unshelve an active server.
self.assertRaises(exceptions.Conflict,
self.client.unshelve_server,
self.server_id)
class ServersNegativeTestXML(ServersNegativeTestJSON):
_interface = 'xml'
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from google.cloud import aiplatform
from typing import Dict, List, Mapping, Optional, Tuple, Union
try:
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp import notebook
except ImportError:
raise ImportError(
"LIT is not installed and is required to get Dataset as the return format. "
'Please install the SDK using "pip install python-aiplatform[lit]"'
)
try:
import tensorflow as tf
except ImportError:
raise ImportError(
"Tensorflow is not installed and is required to load saved model. "
'Please install the SDK using "pip install pip install python-aiplatform[lit]"'
)
try:
import pandas as pd
except ImportError:
raise ImportError(
"Pandas is not installed and is required to read the dataset. "
'Please install Pandas using "pip install python-aiplatform[lit]"'
)
class _VertexLitDataset(lit_dataset.Dataset):
"""LIT dataset class for the Vertex LIT integration.
This is used in the create_lit_dataset function.
"""
def __init__(
self,
dataset: pd.DataFrame,
column_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
):
"""Construct a VertexLitDataset.
Args:
dataset:
Required. A Pandas DataFrame that includes feature column names and data.
column_types:
Required. An OrderedDict of string names matching the columns of the dataset
as the key, and the associated LitType of the column.
"""
self._examples = dataset.to_dict(orient="records")
self._column_types = column_types
def spec(self):
"""Return a spec describing dataset elements."""
return dict(self._column_types)
class _EndpointLitModel(lit_model.Model):
"""LIT model class for the Vertex LIT integration with a model deployed to an endpoint.
This is used in the create_lit_model function.
"""
def __init__(
self,
endpoint: Union[str, aiplatform.Endpoint],
input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
model_id: Optional[str] = None,
):
"""Construct a VertexLitModel.
Args:
model:
Required. The name of the Endpoint resource. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
input_types:
Required. An OrderedDict of string names matching the features of the model
as the key, and the associated LitType of the feature.
output_types:
Required. An OrderedDict of string names matching the labels of the model
as the key, and the associated LitType of the label.
model_id:
Optional. A string of the specific model in the endpoint to create the
LIT model from. If this is not set, any usable model in the endpoint is
used to create the LIT model.
Raises:
ValueError if the model_id was not found in the endpoint.
"""
if isinstance(endpoint, str):
self._endpoint = aiplatform.Endpoint(endpoint)
else:
self._endpoint = endpoint
self._model_id = model_id
self._input_types = input_types
self._output_types = output_types
# Check if the model with the model ID has explanation enabled
if model_id:
deployed_model = next(
filter(
lambda model: model.id == model_id, self._endpoint.list_models()
),
None,
)
if not deployed_model:
raise ValueError(
"A model with id {model_id} was not found in the endpoint {endpoint}.".format(
model_id=model_id, endpoint=endpoint
)
)
self._explanation_enabled = bool(deployed_model.explanation_spec)
# Check if all models in the endpoint have explanation enabled
else:
self._explanation_enabled = all(
model.explanation_spec for model in self._endpoint.list_models()
)
def predict_minibatch(
self, inputs: List[lit_types.JsonDict]
) -> List[lit_types.JsonDict]:
"""Retun predictions based on a batch of inputs.
Args:
inputs: Requred. a List of instances to predict on based on the input spec.
Returns:
A list of predictions based on the output spec.
"""
instances = []
for input in inputs:
instance = [input[feature] for feature in self._input_types]
instances.append(instance)
if self._explanation_enabled:
prediction_object = self._endpoint.explain(instances)
else:
prediction_object = self._endpoint.predict(instances)
outputs = []
for prediction in prediction_object.predictions:
if isinstance(prediction, Mapping):
outputs.append({key: prediction[key] for key in self._output_types})
else:
outputs.append(
{key: prediction[i] for i, key in enumerate(self._output_types)}
)
if self._explanation_enabled:
for i, explanation in enumerate(prediction_object.explanations):
attributions = explanation.attributions
outputs[i]["feature_attribution"] = lit_dtypes.FeatureSalience(
attributions
)
return outputs
def input_spec(self) -> lit_types.Spec:
"""Return a spec describing model inputs."""
return dict(self._input_types)
def output_spec(self) -> lit_types.Spec:
"""Return a spec describing model outputs."""
output_spec_dict = dict(self._output_types)
if self._explanation_enabled:
output_spec_dict["feature_attribution"] = lit_types.FeatureSalience(
signed=True
)
return output_spec_dict
class _TensorFlowLitModel(lit_model.Model):
"""LIT model class for the Vertex LIT integration with a TensorFlow saved model.
This is used in the create_lit_model function.
"""
def __init__(
self,
model: str,
input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
attribution_method: str = "sampled_shapley",
):
"""Construct a VertexLitModel.
Args:
model:
Required. A string reference to a local TensorFlow saved model directory.
The model must have at most one input and one output tensor.
input_types:
Required. An OrderedDict of string names matching the features of the model
as the key, and the associated LitType of the feature.
output_types:
Required. An OrderedDict of string names matching the labels of the model
as the key, and the associated LitType of the label.
attribution_method:
Optional. A string to choose what attribution configuration to
set up the explainer with. Valid options are 'sampled_shapley'
or 'integrated_gradients'.
"""
self._load_model(model)
self._input_types = input_types
self._output_types = output_types
self._input_tensor_name = next(iter(self._kwargs_signature))
self._attribution_explainer = None
if os.environ.get("LIT_PROXY_URL"):
self._set_up_attribution_explainer(model, attribution_method)
@property
def attribution_explainer(self,) -> Optional["AttributionExplainer"]: # noqa: F821
"""Gets the attribution explainer property if set."""
return self._attribution_explainer
def predict_minibatch(
self, inputs: List[lit_types.JsonDict]
) -> List[lit_types.JsonDict]:
"""Retun predictions based on a batch of inputs.
Args:
inputs: Requred. a List of instances to predict on based on the input spec.
Returns:
A list of predictions based on the output spec.
"""
instances = []
for input in inputs:
instance = [input[feature] for feature in self._input_types]
instances.append(instance)
prediction_input_dict = {
self._input_tensor_name: tf.convert_to_tensor(instances)
}
prediction_dict = self._loaded_model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
](**prediction_input_dict)
predictions = prediction_dict[next(iter(self._output_signature))].numpy()
outputs = []
for prediction in predictions:
outputs.append(
{
label: value
for label, value in zip(self._output_types.keys(), prediction)
}
)
# Get feature attributions
if self.attribution_explainer:
attributions = self.attribution_explainer.explain(
[{self._input_tensor_name: i} for i in instances]
)
for i, attribution in enumerate(attributions):
outputs[i]["feature_attribution"] = lit_dtypes.FeatureSalience(
attribution.feature_importance()
)
return outputs
def input_spec(self) -> lit_types.Spec:
"""Return a spec describing model inputs."""
return dict(self._input_types)
def output_spec(self) -> lit_types.Spec:
"""Return a spec describing model outputs."""
output_spec_dict = dict(self._output_types)
if self.attribution_explainer:
output_spec_dict["feature_attribution"] = lit_types.FeatureSalience(
signed=True
)
return output_spec_dict
def _load_model(self, model: str):
"""Loads a TensorFlow saved model and populates the input and output signature attributes of the class.
Args:
model: Required. A string reference to a TensorFlow saved model directory.
Raises:
ValueError if the model has more than one input tensor or more than one output tensor.
"""
self._loaded_model = tf.saved_model.load(model)
serving_default = self._loaded_model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
_, self._kwargs_signature = serving_default.structured_input_signature
self._output_signature = serving_default.structured_outputs
if len(self._kwargs_signature) != 1:
raise ValueError("Please use a model with only one input tensor.")
if len(self._output_signature) != 1:
raise ValueError("Please use a model with only one output tensor.")
def _set_up_attribution_explainer(
self, model: str, attribution_method: str = "integrated_gradients"
):
"""Populates the attribution explainer attribute of the class.
Args:
model: Required. A string reference to a TensorFlow saved model directory.
attribution_method:
Optional. A string to choose what attribution configuration to
set up the explainer with. Valid options are 'sampled_shapley'
or 'integrated_gradients'.
"""
try:
import explainable_ai_sdk
from explainable_ai_sdk.metadata.tf.v2 import SavedModelMetadataBuilder
except ImportError:
logging.info(
"Skipping explanations because the Explainable AI SDK is not installed."
'Please install the SDK using "pip install explainable-ai-sdk"'
)
return
builder = SavedModelMetadataBuilder(model)
builder.get_metadata()
builder.set_numeric_metadata(
self._input_tensor_name,
index_feature_mapping=list(self._input_types.keys()),
)
builder.save_metadata(model)
if attribution_method == "integrated_gradients":
explainer_config = explainable_ai_sdk.IntegratedGradientsConfig()
else:
explainer_config = explainable_ai_sdk.SampledShapleyConfig()
self._attribution_explainer = explainable_ai_sdk.load_model_from_local_path(
model, explainer_config
)
self._load_model(model)
def create_lit_dataset(
dataset: pd.DataFrame,
column_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
) -> lit_dataset.Dataset:
"""Creates a LIT Dataset object.
Args:
dataset:
Required. A Pandas DataFrame that includes feature column names and data.
column_types:
Required. An OrderedDict of string names matching the columns of the dataset
as the key, and the associated LitType of the column.
Returns:
A LIT Dataset object that has the data from the dataset provided.
"""
return _VertexLitDataset(dataset, column_types)
def create_lit_model_from_endpoint(
endpoint: Union[str, aiplatform.Endpoint],
input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
model_id: Optional[str] = None,
) -> lit_model.Model:
"""Creates a LIT Model object.
Args:
model:
Required. The name of the Endpoint resource or an Endpoint instance.
Endpoint name format: ``projects/{project}/locations/{location}/endpoints/{endpoint}``
input_types:
Required. An OrderedDict of string names matching the features of the model
as the key, and the associated LitType of the feature.
output_types:
Required. An OrderedDict of string names matching the labels of the model
as the key, and the associated LitType of the label.
model_id:
Optional. A string of the specific model in the endpoint to create the
LIT model from. If this is not set, any usable model in the endpoint is
used to create the LIT model.
Returns:
A LIT Model object that has the same functionality as the model provided.
"""
return _EndpointLitModel(endpoint, input_types, output_types, model_id)
def create_lit_model(
model: str,
input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
attribution_method: str = "sampled_shapley",
) -> lit_model.Model:
"""Creates a LIT Model object.
Args:
model:
Required. A string reference to a local TensorFlow saved model directory.
The model must have at most one input and one output tensor.
input_types:
Required. An OrderedDict of string names matching the features of the model
as the key, and the associated LitType of the feature.
output_types:
Required. An OrderedDict of string names matching the labels of the model
as the key, and the associated LitType of the label.
attribution_method:
Optional. A string to choose what attribution configuration to
set up the explainer with. Valid options are 'sampled_shapley'
or 'integrated_gradients'.
Returns:
A LIT Model object that has the same functionality as the model provided.
"""
return _TensorFlowLitModel(model, input_types, output_types, attribution_method)
def open_lit(
models: Dict[str, lit_model.Model],
datasets: Dict[str, lit_dataset.Dataset],
open_in_new_tab: bool = True,
):
"""Open LIT from the provided models and datasets.
Args:
models:
Required. A list of LIT models to open LIT with.
input_types:
Required. A lit of LIT datasets to open LIT with.
open_in_new_tab:
Optional. A boolean to choose if LIT open in a new tab or not.
Raises:
ImportError if LIT is not installed.
"""
widget = notebook.LitWidget(models, datasets)
widget.render(open_in_new_tab=open_in_new_tab)
def set_up_and_open_lit(
dataset: Union[pd.DataFrame, lit_dataset.Dataset],
column_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
model: Union[str, lit_model.Model],
input_types: Union[List[str], Dict[str, lit_types.LitType]],
output_types: Union[str, List[str], Dict[str, lit_types.LitType]],
attribution_method: str = "sampled_shapley",
open_in_new_tab: bool = True,
) -> Tuple[lit_dataset.Dataset, lit_model.Model]:
"""Creates a LIT dataset and model and opens LIT.
Args:
dataset:
Required. A Pandas DataFrame that includes feature column names and data.
column_types:
Required. An OrderedDict of string names matching the columns of the dataset
as the key, and the associated LitType of the column.
model:
Required. A string reference to a TensorFlow saved model directory.
The model must have at most one input and one output tensor.
input_types:
Required. An OrderedDict of string names matching the features of the model
as the key, and the associated LitType of the feature.
output_types:
Required. An OrderedDict of string names matching the labels of the model
as the key, and the associated LitType of the label.
attribution_method:
Optional. A string to choose what attribution configuration to
set up the explainer with. Valid options are 'sampled_shapley'
or 'integrated_gradients'.
open_in_new_tab:
Optional. A boolean to choose if LIT open in a new tab or not.
Returns:
A Tuple of the LIT dataset and model created.
Raises:
ImportError if LIT or TensorFlow is not installed.
ValueError if the model doesn't have only 1 input and output tensor.
"""
if not isinstance(dataset, lit_dataset.Dataset):
dataset = create_lit_dataset(dataset, column_types)
if not isinstance(model, lit_model.Model):
model = create_lit_model(
model, input_types, output_types, attribution_method=attribution_method
)
open_lit(
{"model": model}, {"dataset": dataset}, open_in_new_tab=open_in_new_tab,
)
return dataset, model
|
|
# Copyright (c) 2016 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
import six
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import coordination
from cinder import exception
from cinder.i18n import _
import cinder.volume.drivers.ibm.ibm_storage as storage
from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper
from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient
from cinder.volume.drivers.ibm.ibm_storage import proxy
LOG = logging.getLogger(__name__)
PPRC_PATH_NOT_EXIST = 0x00
PPRC_PATH_HEALTHY = 0x01
PPRC_PATH_UNHEALTHY = 0x02
PPRC_PATH_FULL = 0x03
class MetroMirrorManager(object):
"""Manage metro mirror for replication."""
def __init__(self, source_helper, target_helper):
self._source_helper = source_helper
self._target_helper = target_helper
def switch_source_and_target(self):
self._source_helper, self._target_helper = (
self._target_helper, self._source_helper)
def check_physical_links(self):
ports = self._source_helper.get_physical_links(
self._target_helper.backend['storage_wwnn'])
if not ports:
raise exception.VolumeDriverException(
message=((_("%(tgt)s is not connected to %(src)s!") % {
'tgt': self._target_helper.backend['storage_wwnn'],
'src': self._source_helper.backend['storage_wwnn']
})))
pairs = [{
'source_port_id': p['source_port_id'],
'target_port_id': p['target_port_id']
} for p in ports]
if not self._target_helper.backend['port_pairs']:
# if there are more than eight physical links,
# choose eight of them.
self._target_helper.backend['port_pairs'] = (
pairs[:8] if len(pairs) > 8 else pairs)
else:
# verify the port pairs user set
for pair in self._target_helper.backend['port_pairs']:
if pair not in pairs:
valid_pairs = ';'.join(
["%s-%s" % (p['source_port_id'],
p['target_port_id'])
for p in pairs])
invalid_pair = "%s-%s" % (pair['source_port_id'],
pair['target_port_id'])
raise exception.VolumeDriverException(
message=((_("Invalid port pair: %(invalid)s, valid "
"port pair(s) are: %(valid)s")
% {'invalid': invalid_pair,
'valid': valid_pairs})))
self._source_helper.backend['port_pairs'] = [{
'source_port_id': p['target_port_id'],
'target_port_id': p['source_port_id']
} for p in self._target_helper.backend['port_pairs']]
def is_target_alive(self):
try:
self._target_helper.get_systems()
except restclient.TimeoutException as e:
LOG.info("REST request time out, backend may be not available "
"any more. Exception: %s", e)
return False
return True
def find_from_pprc_paths(self, specified_lss=None, excluded_lss=None):
"""find lss from existing pprc paths and pool id for it.
the format of pool_lss_pair returned is as below:
{'source': (pid, lss), 'target': (pid, lss)}
"""
state, paths = self._filter_pprc_paths(specified_lss)
if state != PPRC_PATH_HEALTHY:
# check whether the physical links are available or not,
# or have been changed.
self.check_physical_links()
return state, None
if excluded_lss:
paths = [p for p in paths
if p['source_lss_id'] not in excluded_lss]
# only establish_replication will specify the source LSS
# and it need to reuse LSS reserved for CG if this LSS
# is in PPRC path.
if not specified_lss:
paths = [p for p in paths if p['source_lss_id'] not in
self._source_helper.backend['lss_ids_for_cg']]
# sort pairs according to the number of luns in their LSSes,
# and get the pair which LSS has least luns.
candidates = []
source_lss_set = set(p['source_lss_id'] for p in paths)
for lss in source_lss_set:
# get the number of luns in source.
src_luns = self._source_helper.get_lun_number_in_lss(lss)
if src_luns == helper.LSS_VOL_SLOTS and not specified_lss:
continue
spec_paths = [p for p in paths if p['source_lss_id'] == lss]
for path in spec_paths:
# get the number of luns in target.
try:
tgt_luns = self._target_helper.get_lun_number_in_lss(
path['target_lss_id'])
except restclient.APIException:
# if DS8K can fix this problem, then remove the
# exception here.
LOG.error("Target LSS %s in PPRC path may doesn't "
"exist although PPRC path is available.",
path['target_lss_id'])
tgt_luns = 0
candidates.append((path['source_lss_id'],
path['target_lss_id'],
src_luns + tgt_luns))
if not candidates:
return PPRC_PATH_FULL, None
else:
src_lss, tgt_lss, num = sorted(candidates, key=lambda c: c[2])[0]
return PPRC_PATH_HEALTHY, {
'source': (self._source_helper.get_pool(src_lss), src_lss),
'target': (self._target_helper.get_pool(tgt_lss), tgt_lss)
}
def _filter_pprc_paths(self, lss):
paths = self._source_helper.get_pprc_paths(lss)
if paths:
# get the paths only connected to replication target
paths = [p for p in paths if p['target_system_wwnn'] in
self._target_helper.backend['storage_wwnn']]
else:
LOG.info("No PPRC paths found in primary DS8K.")
return PPRC_PATH_NOT_EXIST, None
# get the paths whose port pairs have been set in configuration file.
expected_port_pairs = [
(port['source_port_id'], port['target_port_id'])
for port in self._target_helper.backend['port_pairs']]
for path in paths[:]:
port_pairs = [(p['source_port_id'], p['target_port_id'])
for p in path['port_pairs']]
if not (set(port_pairs) & set(expected_port_pairs)):
paths.remove(path)
if not paths:
LOG.info("Existing PPRC paths do not use port pairs that "
"are set.")
return PPRC_PATH_NOT_EXIST, None
# abandon PPRC paths according to volume type(fb/ckd)
source_lss_set = set(p['source_lss_id'] for p in paths)
if self._source_helper.backend.get('device_mapping'):
source_lss_set = source_lss_set & set(
self._source_helper.backend['device_mapping'].keys())
else:
all_lss = self._source_helper.get_all_lss(['id', 'type'])
fb_lss = set(
lss['id'] for lss in all_lss if lss['type'] == 'fb')
source_lss_set = source_lss_set & fb_lss
paths = [p for p in paths if p['source_lss_id'] in source_lss_set]
if not paths:
LOG.info("No source LSS in PPRC paths has correct volume type.")
return PPRC_PATH_NOT_EXIST, None
# if the group property of lss doesn't match pool node,
# abandon these paths.
discarded_src_lss = []
discarded_tgt_lss = []
for lss in source_lss_set:
spec_paths = [p for p in paths if p['source_lss_id'] == lss]
if self._source_helper.get_pool(lss) is None:
discarded_src_lss.append(lss)
continue
for spec_path in spec_paths:
tgt_lss = spec_path['target_lss_id']
if self._target_helper.get_pool(tgt_lss) is None:
discarded_tgt_lss.append(tgt_lss)
if discarded_src_lss:
paths = [p for p in paths if p['source_lss_id'] not in
discarded_src_lss]
if discarded_tgt_lss:
paths = [p for p in paths if p['target_lss_id'] not in
discarded_tgt_lss]
if not paths:
LOG.info("No PPRC paths can be re-used.")
return PPRC_PATH_NOT_EXIST, None
# abandon unhealthy PPRC paths.
for path in paths[:]:
failed_port_pairs = [
p for p in path['port_pairs'] if p['state'] != 'success']
if len(failed_port_pairs) == len(path['port_pairs']):
paths.remove(path)
if not paths:
LOG.info("PPRC paths between primary and target DS8K "
"are unhealthy.")
return PPRC_PATH_UNHEALTHY, None
return PPRC_PATH_HEALTHY, paths
def create_pprc_path(self, lun, is_group=False):
switch = lun.failed_over if is_group else False
src_helper, tgt_helper = (
(self._target_helper, self._source_helper) if switch else
(self._source_helper, self._target_helper))
src_lss = lun.pool_lss_pair['source'][1]
tgt_lss = lun.pool_lss_pair['target'][1]
# check whether the pprc path exists and is healthy or not.
pid = (src_helper.backend['storage_wwnn'] + '_' + src_lss + ':' +
tgt_helper.backend['storage_wwnn'] + '_' + tgt_lss)
state = self._is_pprc_paths_healthy(pid, switch)
LOG.info("The state of PPRC path %(path)s is %(state)s.",
{'path': pid, 'state': state})
if state == PPRC_PATH_HEALTHY:
return
# create the pprc path
pathData = {
'target_system_wwnn': tgt_helper.backend['storage_wwnn'],
'source_lss_id': src_lss,
'target_lss_id': tgt_lss,
'port_pairs': tgt_helper.backend['port_pairs']
}
if lun.group and lun.group.consisgroup_replication_enabled:
pathData['pprc_consistency_group'] = 'enable'
LOG.info("PPRC path %(src)s:%(tgt)s will be created.",
{'src': src_lss, 'tgt': tgt_lss})
src_helper.create_pprc_path(pathData)
# check the state of the pprc path
LOG.debug("Checking the state of the new PPRC path.")
for retry in range(4):
eventlet.sleep(2)
if self._is_pprc_paths_healthy(pid, switch) == PPRC_PATH_HEALTHY:
break
if retry == 3:
src_helper.delete_pprc_path(pid)
raise restclient.APIException(
data=(_("Failed to create PPRC path %(src)s:%(tgt)s.")
% {'src': src_lss, 'tgt': tgt_lss}))
LOG.debug("Create the new PPRC path successfully.")
def _is_pprc_paths_healthy(self, path_id, switch):
bck_helper = self._target_helper if switch else self._source_helper
try:
path = bck_helper.get_pprc_path(path_id)
except restclient.APIException:
return PPRC_PATH_NOT_EXIST
for port in path['port_pairs']:
if port['state'] == 'success':
return PPRC_PATH_HEALTHY
return PPRC_PATH_UNHEALTHY
def create_pprc_pairs(self, lun):
tgt_vol_id = lun.replication_driver_data[
self._target_helper.backend['id']]['vol_hex_id']
tgt_stg_id = self._target_helper.backend['storage_unit']
vol_pairs = [{
'source_volume': lun.ds_id,
'source_system_id': self._source_helper.backend['storage_unit'],
'target_volume': tgt_vol_id,
'target_system_id': tgt_stg_id
}]
pair_data = {
"volume_pairs": vol_pairs,
"type": "metro_mirror",
"options": ["permit_space_efficient_target",
"initial_copy_full"]
}
LOG.debug("Creating pprc pair, pair_data is %s.", pair_data)
self._source_helper.create_pprc_pair(pair_data)
self._source_helper.wait_pprc_copy_finished([lun.ds_id], 'full_duplex')
LOG.info("The state of PPRC pair has become full_duplex.")
def delete_pprc_pairs(self, lun):
self._source_helper.delete_pprc_pair(lun.ds_id)
if self.is_target_alive() and lun.replication_driver_data:
replica = sorted(lun.replication_driver_data.values())[0]
self._target_helper.delete_pprc_pair(replica['vol_hex_id'])
def do_pprc_failover(self, luns, is_group=False):
switch = luns[0].failed_over if is_group else False
src_helper, tgt_helper = (
(self._target_helper, self._source_helper) if switch else
(self._source_helper, self._target_helper))
vol_pairs = []
target_vol_ids = []
for lun in luns:
if not tgt_helper.lun_exists(lun.replica_ds_id):
LOG.info("Target volume %(volid)s doesn't exist in "
"DS8K %(storage)s.",
{'volid': lun.replica_ds_id,
'storage': tgt_helper.backend['storage_unit']})
continue
vol_pairs.append({
'source_volume': lun.replica_ds_id,
'source_system_id': tgt_helper.backend['storage_unit'],
'target_volume': lun.ds_id,
'target_system_id': src_helper.backend['storage_unit']
})
target_vol_ids.append(lun.replica_ds_id)
pair_data = {
"volume_pairs": vol_pairs,
"type": "metro_mirror",
"options": ["failover"]
}
LOG.info("Begin to fail over to %(backend)s, "
"pair_data is %(pair_data)s.",
{'backend': tgt_helper.backend['storage_unit'],
'pair_data': pair_data})
tgt_helper.create_pprc_pair(pair_data)
tgt_helper.wait_pprc_copy_finished(target_vol_ids,
'suspended', switch)
LOG.info("Failover from %(src)s to %(tgt)s is finished.", {
'src': src_helper.backend['storage_unit'],
'tgt': tgt_helper.backend['storage_unit']
})
def get_pprc_pair_ids(self, luns, switch=False):
if not luns:
return None
src_helper, tgt_helper = (
(self._target_helper, self._source_helper) if switch else
(self._source_helper, self._target_helper))
pprc_pair_ids = []
for lun in luns:
if switch:
is_lun_exist = tgt_helper.lun_exists(lun.replica_ds_id)
else:
is_lun_exist = src_helper.lun_exists(lun.ds_id)
if not is_lun_exist:
LOG.info("Target volume %(volume)s doesn't exist in "
"DS8K %(storage)s.",
{'volume': (lun.replica_ds_id
if switch else lun.ds_id),
'storage': (tgt_helper.backend['storage_unit']
if switch else
src_helper.backend['storage_unit'])})
continue
pprc_pair_ids.append(
src_helper.backend['storage_unit'] + '_' + lun.ds_id + ':' +
tgt_helper.backend['storage_unit'] + '_' + lun.replica_ds_id)
return pprc_pair_ids
def do_pprc_failback(self, luns, is_group=False):
switch = luns[0].failed_over if is_group else False
bck_helper = self._target_helper if switch else self._source_helper
pair_data = {"pprc_ids": self.get_pprc_pair_ids(luns, switch),
"type": "metro_mirror",
"options": ["failback"]}
LOG.info("Begin to run failback in %(backend)s, "
"pair_data is %(pair_data)s.",
{'backend': bck_helper.backend['storage_unit'],
'pair_data': pair_data})
bck_helper.do_failback(pair_data)
lun_ids = [lun.ds_id for lun in luns]
bck_helper.wait_pprc_copy_finished(lun_ids, 'full_duplex', switch)
LOG.info("Run failback in %s is finished.",
bck_helper.backend['storage_unit'])
class Replication(object):
"""Metro Mirror and Global Mirror will be used by it.
Version history:
.. code-block:: none
1.0.0 - initial revision.
2.1.0 - ignore exception during cleanup when creating or deleting
replica failed.
2.1.1 - Adding support for replication consistency group.
"""
VERSION = "2.1.1"
def __init__(self, source_helper, target_device):
self._source_helper = source_helper
connection_type = target_device.get('connection_type')
if connection_type == storage.XIV_CONNECTION_TYPE_FC:
self._target_helper = (
helper.DS8KReplicationTargetHelper(target_device))
elif connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
self._target_helper = (
helper.DS8KReplicationTargetECKDHelper(target_device))
else:
raise exception.InvalidParameterValue(
err=(_("Param [connection_type] %s in replication_device "
"is invalid.") % connection_type))
if self._target_helper.backend['lss_ids_for_cg']:
if (len(self._target_helper.backend['lss_ids_for_cg']) !=
len(self._source_helper.backend['lss_ids_for_cg'])):
raise exception.VolumeDriverException(
message=_("Please reserve the same number of LSS for "
"secondary DS8K just as the primary DS8K."))
else:
self._target_helper.backend['lss_ids_for_cg'] = (
self._source_helper.backend['lss_ids_for_cg'])
self._mm_manager = MetroMirrorManager(self._source_helper,
self._target_helper)
def get_target_helper(self):
return self._target_helper
def get_source_helper(self):
return self._source_helper
def check_connection_type(self):
src_conn_type = self._source_helper.get_connection_type()
tgt_conn_type = self._target_helper.get_connection_type()
if src_conn_type != tgt_conn_type:
raise exception.VolumeDriverException(
message=(_("The connection type in primary backend is "
"%(primary)s, but in secondary backend it is "
"%(secondary)s")
% {'primary': src_conn_type,
'secondary': tgt_conn_type}))
def check_physical_links(self):
self._mm_manager.check_physical_links()
def switch_source_and_target_client(self):
# switch the helper in metro mirror manager
self._mm_manager.switch_source_and_target()
# switch the helper
self._source_helper, self._target_helper = (
self._target_helper, self._source_helper)
def _switch_source_and_target_volume(self, luns, secondary_backend_id):
for lun in luns:
if secondary_backend_id == 'default':
backend_id = self._target_helper.backend['id']
lun.failed_over = False
else:
backend_id = 'default'
lun.failed_over = True
# secondary_id is never blank here.
lun.replication_driver_data = (
{backend_id: {'vol_hex_id': lun.ds_id}})
lun.ds_id, lun.replica_ds_id = lun.replica_ds_id, lun.ds_id
return luns
@proxy.logger
def find_pool_lss_pair(self, excluded_lss):
state, pool_lss_pair = (
self._mm_manager.find_from_pprc_paths(None, excluded_lss))
if pool_lss_pair is None:
pool_lss_pair = self.find_new_lss_for_source(excluded_lss)
pool_lss_pair.update(self.find_new_lss_for_target())
return pool_lss_pair
@proxy.logger
def find_new_lss_for_source(self, excluded_lss):
src_pid, src_lss = self._source_helper.find_pool_and_lss(excluded_lss)
return {'source': (src_pid, src_lss)}
@proxy.logger
def find_new_lss_for_target(self):
tgt_pid, tgt_lss = self._target_helper.find_pool_and_lss()
return {'target': (tgt_pid, tgt_lss)}
@proxy.logger
def establish_replication(self, lun, delete_source=False):
state, lun.pool_lss_pair = (
self._mm_manager.find_from_pprc_paths(lun.ds_id[0:2]))
LOG.debug("establish_replication: pool_lss_pair is %s.",
lun.pool_lss_pair)
if state == PPRC_PATH_UNHEALTHY:
raise restclient.APIException(
data=(_("The path(s) for volume %(name)s isn't available "
"any more, please make sure the state of the path(s) "
"which source LSS is %(lss)s is success.")
% {'name': lun.cinder_name, 'lss': lun.ds_id[0:2]}))
elif state == PPRC_PATH_NOT_EXIST:
pid = self._source_helper.get_pool(lun.ds_id[0:2])
lun.pool_lss_pair = {'source': (pid, lun.ds_id[0:2])}
lun.pool_lss_pair.update(self.find_new_lss_for_target())
lun = self.create_replica(lun, delete_source)
return lun
@proxy.logger
@coordination.synchronized('ibm-ds8k-replication')
def create_replica(self, lun, delete_source=True):
try:
self._target_helper.create_lun(lun)
# create PPRC paths if need.
self._mm_manager.create_pprc_path(lun)
# create pprc pair
self._mm_manager.create_pprc_pairs(lun)
except restclient.APIException:
with excutils.save_and_reraise_exception():
try:
self.delete_replica(lun)
if delete_source:
self._source_helper.delete_lun(lun)
except restclient.APIException as ex:
LOG.info("Failed to cleanup replicated volume %(id)s, "
"Exception: %(ex)s.",
{'id': lun.ds_id, 'ex': ex})
lun.replication_status = 'enabled'
return lun
@proxy.logger
def delete_replica(self, lun, delete_source=False):
if lun.ds_id is not None:
try:
self._mm_manager.delete_pprc_pairs(lun)
self._delete_replica(lun)
except restclient.APIException as e:
if delete_source:
try:
self._source_helper.delete_lun(lun)
except restclient.APIException as ex:
LOG.info("Failed to delete source volume %(id)s, "
"Exception: %(ex)s.",
{'id': lun.ds_id, 'ex': ex})
raise exception.VolumeDriverException(
message=(_('Failed to delete the target volume for '
'volume %(volume)s, Exception: %(ex)s.')
% {'volume': lun.ds_id, 'ex': six.text_type(e)}))
lun.replication_status = 'disabled'
lun.replication_driver_data = {}
return lun
@proxy.logger
def _delete_replica(self, lun):
if not lun.replication_driver_data:
LOG.error("No replica ID for lun %s, maybe there is something "
"wrong when creating the replica for lun.", lun.ds_id)
return None
for backend_id, backend in lun.replication_driver_data.items():
if not self._mm_manager.is_target_alive():
return None
if not self._target_helper.lun_exists(backend['vol_hex_id']):
LOG.debug("Replica %s not found.", backend['vol_hex_id'])
continue
LOG.debug("Deleting replica %s.", backend['vol_hex_id'])
self._target_helper.delete_lun_by_id(backend['vol_hex_id'])
def extend_replica(self, lun, param):
for backend_id, backend in lun.replication_driver_data.items():
self._target_helper.change_lun(backend['vol_hex_id'], param)
def delete_pprc_pairs(self, lun):
self._mm_manager.delete_pprc_pairs(lun)
def create_pprc_pairs(self, lun):
self._mm_manager.create_pprc_pairs(lun)
def start_host_pprc_failover(self, luns, backend_id):
self._mm_manager.do_pprc_failover(luns)
self.switch_source_and_target_client()
self._switch_source_and_target_volume(luns, backend_id)
def start_group_pprc_failover(self, luns, backend_id):
# unlike host failover, group failover needs to fetch changes from
# target volumes to source volumes after group is failed over.
self._mm_manager.do_pprc_failover(luns, True)
self._switch_source_and_target_volume(luns, backend_id)
sample_luns = self._get_sample_luns(luns)
for lun in sample_luns:
self._mm_manager.create_pprc_path(lun, True)
self._mm_manager.do_pprc_failback(luns, True)
def _get_sample_luns(self, luns):
# choose sample lun according to position.
sample_luns = []
positions = []
for lun in luns:
position = (lun.pool_lss_pair['source'][1],
lun.pool_lss_pair['target'][1])
if position not in positions:
sample_luns.append(lun)
positions.append(position)
return sample_luns
@proxy.logger
def start_host_pprc_failback(self, luns, backend_id):
# check whether primary client is alive or not.
if not self._mm_manager.is_target_alive():
try:
self._target_helper.update_client()
except restclient.APIException:
msg = _("Can not connect to the primary backend, "
"please make sure it is back.")
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
LOG.debug("Failback host starts, backend id is %s.", backend_id)
sample_luns = self._get_sample_luns(luns)
for lun in sample_luns:
self._mm_manager.create_pprc_path(lun)
self._mm_manager.do_pprc_failback(luns)
# revert the relationship of source volume and target volume
self.start_host_pprc_failover(luns, backend_id)
self._mm_manager.do_pprc_failback(luns)
LOG.debug("Failback host ends, backend id is %s.", backend_id)
@proxy.logger
def start_group_pprc_failback(self, luns, backend_id):
# NOTE: unlike failover host, after group is failed over,
# source and target clients are not swapped.
LOG.debug("Failback group starts, backend id is %s.", backend_id)
self.start_group_pprc_failover(luns, backend_id)
LOG.debug("Failback group ends, backend id is %s.", backend_id)
def _get_expected_luns(self, luns, state, ignored_state=None):
lun_ids = set(lun.ds_id for lun in luns)
min_lun_id = min(lun_ids)
max_lun_id = max(lun_ids)
if not luns[0].failed_over:
pairs = self._source_helper.get_pprc_pairs(min_lun_id, max_lun_id)
else:
pairs = self._target_helper.get_pprc_pairs(min_lun_id, max_lun_id)
pairs = {pair['source_volume']['name']: pair for pair in pairs}
expected_luns = []
for lun in luns:
pair = pairs.get(lun.ds_id)
if pair:
if ignored_state and pair['state'] == ignored_state:
continue
elif pair['state'] != state:
raise exception.VolumeDriverException(
message=(_("Source volume %(id)s has wrong pprc pair "
"state %(invalid_state)s, expected one is "
"%(valid_state)s")
% {'id': pair['source_volume']['name'],
'invalid_state': pair['state'],
'valid_state': state}))
else:
raise exception.VolumeDriverException(
message=_("There is no PPRC pair for source volume "
"%s.") % lun.ds_id)
expected_luns.append(lun)
return expected_luns
@proxy.logger
def enable_replication(self, luns):
# after group is failed over, user can not enable replication.
if not luns:
return None
luns = self._get_expected_luns(luns, 'suspended', 'full_duplex')
pprc_pair_ids = self._mm_manager.get_pprc_pair_ids(luns)
LOG.debug("enable_replication: pprc_pair_ids is %s", pprc_pair_ids)
if pprc_pair_ids:
self._source_helper.resume_pprc_pairs(pprc_pair_ids)
@proxy.logger
def disable_replication(self, luns):
# after group is failed over, user can not disable replication.
if not luns:
return None
luns = self._get_expected_luns(luns, 'full_duplex', 'suspended')
pprc_pair_ids = self._mm_manager.get_pprc_pair_ids(luns)
LOG.debug("disable_replication: pprc_pair_ids is %s", pprc_pair_ids)
if pprc_pair_ids:
self._source_helper.pause_pprc_pairs(pprc_pair_ids)
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Test the discrete_domain utilities.
Caveat assumes that the MNI template image is available at
in ~/.nipy/tests/data
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from ..discrete_domain import smatrix_from_nd_idx, smatrix_from_3d_array, \
smatrix_from_nd_array, domain_from_binary_array, domain_from_image, \
domain_from_mesh, grid_domain_from_binary_array, grid_domain_from_image, \
grid_domain_from_shape
from nibabel import Nifti1Image
import nibabel.gifti as nbg
from nipy.testing.decorators import skipif
GOOD_GIFTI = hasattr(nbg, 'GiftiDataArray')
shape = np.array([5, 6, 7, 8, 9])
def generate_dataset(shape):
"""Generate a dataset with the described shape
"""
dim = len(shape)
idx = np.reshape(np.indices(shape), (dim, -1)).T
return idx
def test_smatrix_1d():
"""Test the 1-d topological domain
"""
idx = generate_dataset(shape[:1])
sm = smatrix_from_nd_idx(idx, nn=0)
assert_equal(sm.data.size, 2 * shape[0] - 2)
def test_smatrix_2d():
"""Test the 2-d topological domain
"""
idx = generate_dataset(shape[:2])
sm = smatrix_from_nd_idx(idx, nn=0)
ne = 2 * (2 * np.prod(shape[:2]) - shape[0] - shape[1])
assert_equal(sm.data.size, ne)
def test_smatrix_3d():
"""Test the 3-d topological domain
"""
idx = generate_dataset(shape[:3])
sm = smatrix_from_nd_idx(idx)
ne = 2 * (3 * np.prod(shape[:3]) - shape[0] * shape[1]
- shape[0] * shape[2] - shape[1] * shape[2])
assert_equal(sm.data.size, ne)
def test_smatrix_4d():
"""Test the 4-d topological domain
"""
idx = generate_dataset(shape[:4])
sm = smatrix_from_nd_idx(idx)
ne = 4 * np.prod(shape[:4])
for d in range(4):
ne -= np.prod(shape[:4]) / shape[d]
ne *= 2
assert_equal(sm.data.size, ne)
def test_smatrix_5d():
"""Test the 5-d topological domain
"""
idx = generate_dataset(shape)
sm = smatrix_from_nd_idx(idx)
ne = 5 * np.prod(shape)
for d in range(5):
ne -= np.prod(shape) / shape[d]
ne *= 2
assert_equal(sm.data.size, ne)
def test_smatrix_5d_bis():
"""Test the 5-d topological domain
"""
toto = np.ones(shape)
sm = smatrix_from_nd_array(toto)
ne = 5 * np.prod(shape)
for d in range(5):
ne -= np.prod(shape) / shape[d]
ne *= 2
assert_equal(sm.data.size, ne)
def test_matrix_from_3d_array():
"""Test the topology using the nipy.graph approach
"""
toto = np.ones(shape[:3])
sm = smatrix_from_3d_array(toto, 6)
ne = 3 * np.prod(shape[:3])
for d in range(3):
ne -= np.prod(shape[:3]) / shape[d]
ne *= 2
print sm.data, ne
assert_equal((sm.data > 0).sum(), ne)
def test_array_domain():
"""Test the construction of domain based on array
"""
toto = np.ones(shape)
ddom = domain_from_binary_array(toto)
assert_equal(np.sum(ddom.local_volume), np.prod(shape))
def test_connected_components():
"""Test the estimation of connected components
"""
toto = np.ones(shape)
ddom = domain_from_binary_array(toto)
assert_equal(ddom.connected_components(), np.zeros(ddom.size))
def test_image_domain():
"""Test the construction of domain based on image
"""
toto = np.ones(shape[:3])
affine = np.random.randn(4, 4)
affine[3:, 0:3] = 0
nim = Nifti1Image(toto, affine)
ddom = domain_from_image(nim)
ref = np.sum(toto) * np.absolute(np.linalg.det(affine))
assert_almost_equal(np.sum(ddom.local_volume), ref)
def test_image_feature():
"""Test the construction of domain based on image and related feature
"""
mask = np.random.randn(*shape[:3]) > .5
noise = np.random.randn(*shape[:3])
affine = np.eye(4)
mim = Nifti1Image(mask.astype('u8'), affine)
nim = Nifti1Image(noise, affine)
ddom = grid_domain_from_image(mim)
ddom.make_feature_from_image(nim, 'noise')
assert_almost_equal(ddom.features['noise'], noise[mask])
def test_array_grid_domain():
"""Test the construction of grid domain based on array
"""
toto = np.ones(shape)
ddom = grid_domain_from_binary_array(toto)
assert_equal(np.sum(ddom.local_volume), np.prod(shape))
def test_image_grid_domain():
"""Test the construction of grid domain based on image
"""
toto = np.ones(shape[:3])
affine = np.random.randn(4, 4)
affine[3:, 0:3] = 0
nim = Nifti1Image(toto, affine)
ddom = grid_domain_from_image(nim)
ref = np.sum(toto) * np.absolute(np.linalg.det(affine[:3, 0:3]))
assert_almost_equal(np.sum(ddom.local_volume), ref)
def test_shape_grid_domain():
"""
"""
ddom = grid_domain_from_shape(shape)
assert_equal(np.sum(ddom.local_volume), np.prod(shape))
def test_feature():
""" test feature inclusion
"""
toto = np.random.rand(*shape)
ddom = domain_from_binary_array(toto)
ddom.set_feature('data', np.ravel(toto))
plop = ddom.get_feature('data')
assert_almost_equal(plop, np.ravel(toto))
def test_mask_feature():
""" test_feature_masking
"""
toto = np.random.rand(*shape)
ddom = domain_from_binary_array(toto)
ddom.set_feature('data', np.ravel(toto))
mdom = ddom.mask(np.ravel(toto > .5))
plop = mdom.get_feature('data')
assert_almost_equal(plop, toto[toto > .5])
def test_domain_mask():
"""test domain masking
"""
toto = np.random.rand(*shape)
ddom = domain_from_binary_array(toto)
mdom = ddom.mask(np.ravel(toto > .5))
assert_equal(mdom.size, np.sum(toto > .5))
def test_grid_domain_mask():
"""test grid domain masking
"""
toto = np.random.rand(*shape)
ddom = grid_domain_from_binary_array(toto)
mdom = ddom.mask(np.ravel(toto > .5))
assert_equal(mdom.size, np.sum(toto > .5))
@skipif(not GOOD_GIFTI)
def test_domain_from_mesh():
"""Test domain_from_mesh method
"""
coords = np.array([[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
triangles = np.asarray([[0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3]])
darrays = [nbg.GiftiDataArray(coords)] + [nbg.GiftiDataArray(triangles)]
toy_image = nbg.GiftiImage(darrays=darrays)
domain = domain_from_mesh(toy_image)
# if we get there, we could build the domain, and that's what we wanted.
assert_equal(domain.get_coord(), coords)
def test_representative():
""" test representative computation
"""
toto = np.random.rand(*shape)
ddom = domain_from_binary_array(toto)
ddom.set_feature('data', np.ravel(toto))
dmean = toto.mean()
dmin = toto.min()
dmax = toto.max()
dmed = np.median(toto)
assert_almost_equal(ddom.representative_feature('data', 'mean'), dmean)
assert_almost_equal(ddom.representative_feature('data', 'min'), dmin)
assert_almost_equal(ddom.representative_feature('data', 'max'), dmax)
assert_almost_equal(ddom.representative_feature('data', 'median'), dmed)
def test_integrate_1d():
""" test integration in 1d
"""
toto = np.random.rand(*shape)
ddom = domain_from_binary_array(toto)
ddom.set_feature('data', np.ravel(toto))
assert_almost_equal(ddom.integrate('data'), toto.sum())
def test_integrate_2d():
"""test integration in 2d
"""
toto = np.random.rand(*shape)
ddom = domain_from_binary_array(toto)
ftoto = np.ravel(toto)
f2 = np.vstack((ftoto, ftoto)).T
ddom.set_feature('data', f2)
ts = np.ones(2) * toto.sum()
assert_almost_equal(ddom.integrate('data'), ts)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import jmespath
from mock import patch, MagicMock
from streamalert.alert_processor import main as alert_processor
from streamalert.alert_processor.helpers import compose_alert
from streamalert.alert_processor.outputs.output_base import OutputDispatcher
from streamalert.classifier import classifier
from streamalert.rules_engine import rules_engine
from streamalert.shared import rule
from streamalert.shared.config import ConfigError
from streamalert.shared.logger import get_logger
from streamalert.shared.stats import RuleStatisticTracker
from streamalert_cli.helpers import check_credentials
from streamalert_cli.test.format import format_green, format_red, format_underline, format_yellow
from streamalert_cli.test.mocks import LookupTableMocks, ThreatIntelMocks
from streamalert_cli.test.event_file import TestEventFile
from streamalert_cli.utils import (
CLICommand,
DirectoryType,
generate_subparser,
UniqueSortedFileListAction,
UniqueSortedListAction,
)
LOGGER = get_logger(__name__)
class TestCommand(CLICommand):
description = 'Perform various integration/functional tests'
@classmethod
def setup_subparser(cls, subparser):
"""Add the test subparser: manage.py test"""
test_subparsers = subparser.add_subparsers(dest='test subcommand', required=True)
cls._setup_test_classifier_subparser(test_subparsers)
cls._setup_test_rules_subparser(test_subparsers)
cls._setup_test_live_subparser(test_subparsers)
@classmethod
def _setup_test_classifier_subparser(cls, subparsers):
"""Add the test validation subparser: manage.py test classifier [options]"""
test_validate_parser = generate_subparser(
subparsers,
'classifier',
description='Validate defined log schemas using integration test files',
subcommand=True
)
cls._add_default_test_args(test_validate_parser)
@classmethod
def _setup_test_rules_subparser(cls, subparsers):
"""Add the test rules subparser: manage.py test rules [options]"""
test_rules_parser = generate_subparser(
subparsers,
'rules',
description='Test rules using integration test files',
subcommand=True
)
# Flag to run additional stats during testing
test_rules_parser.add_argument(
'-s',
'--stats',
action='store_true',
help='Enable outputing of statistical information on rules that run'
)
# Validate the provided repitition value
def _validate_repitition(val):
"""Make sure the input is between 1 and 1000"""
err = ('Invalid repitition value [{}]. Must be an integer between 1 '
'and 1000').format(val)
try:
count = int(val)
except TypeError:
raise test_rules_parser.error(err)
if not 1 <= count <= 1000:
raise test_rules_parser.error(err)
return count
# flag to run these tests a given number of times
test_rules_parser.add_argument(
'-n',
'--repeat',
default=1,
type=_validate_repitition,
help='Number of times to repeat the tests, to be used as a form performance testing'
)
cls._add_default_test_args(test_rules_parser)
@classmethod
def _setup_test_live_subparser(cls, subparsers):
"""Add the test live subparser: manage.py test live [options]"""
test_live_parser = generate_subparser(
subparsers,
'live',
description=(
'Run end-to-end tests that will attempt to send alerts to each rule\'s outputs'
),
subcommand=True
)
cls._add_default_test_args(test_live_parser)
@staticmethod
def _add_default_test_args(test_parser):
"""Add the default arguments to the test parsers"""
test_filter_group = test_parser.add_mutually_exclusive_group(required=False)
# add the optional ability to test specific files
test_filter_group.add_argument(
'-f',
'--test-files',
dest='files',
nargs='+',
help='Full path to one or more file(s) to test, separated by spaces',
action=UniqueSortedFileListAction,
type=argparse.FileType('r'),
default=[]
)
# add the optional ability to test specific rules
test_filter_group.add_argument(
'-r',
'--test-rules',
dest='rules',
nargs='+',
help='One or more rule to test, separated by spaces',
action=UniqueSortedListAction,
default=[]
)
# add the ability to specify rule directories to test
test_parser.add_argument(
'-d',
'--rules-dir',
help='Path to one or more directory containing rules, separated by spaces',
nargs='+',
action=UniqueSortedListAction,
type=DirectoryType(),
default=['rules']
)
# Add the optional ability to log verbosely or use quite logging for tests
verbose_group = test_parser.add_mutually_exclusive_group(required=False)
verbose_group.add_argument(
'-v',
'--verbose',
action='store_true',
help='Output additional information during testing'
)
verbose_group.add_argument(
'-q',
'--quiet',
action='store_true',
help='Suppress output for passing tests, only logging if there is a failure'
)
@classmethod
def handler(cls, options, config):
"""Handler for starting the test framework
Args:
options (argparse.Namespace): Parsed arguments
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
result = True
opts = vars(options)
repeat = opts.get('repeat', 1)
for i in range(repeat):
if repeat != 1:
print('\nRepetition #', i+1)
result = result and TestRunner(options, config).run()
if opts.get('stats'):
print(RuleStatisticTracker.statistics_info())
return result
class TestRunner:
"""TestRunner to handle running various tests"""
class Types:
"""Simple types enum for test types"""
CLASSIFY = 'classifier'
RULES = 'rules'
LIVE = 'live'
def __init__(self, options, config):
self._config = config
self._options = options
self._type = options.subcommand
self._files_filter = options.files
self._rules = options.rules
self._rules_dirs = options.rules_dir
self._rules_engine = self._setup_rules_engine(options.rules_dir)
self._verbose = options.verbose
self._quiet = options.quiet
self._s3_mocker = patch('streamalert.classifier.payload.s3.boto3.resource').start()
self._tested_rules = set()
self._passed = 0
self._failed = 0
prefix = self._config['global']['account']['prefix']
env = {
'STREAMALERT_PREFIX': prefix,
'AWS_ACCOUNT_ID': self._config['global']['account']['aws_account_id'],
'ALERTS_TABLE': '{}_streamalert_alerts'.format(prefix),
}
if 'stats' in options and options.stats:
env['STREAMALERT_TRACK_RULE_STATS'] = '1'
patch.dict(os.environ, env).start()
@staticmethod
def _run_classification(record):
"""Create a fresh classifier and classify the record, returning the result"""
with patch.object(classifier, 'SQSClient'), patch.object(classifier, 'FirehoseClient'):
_classifier = classifier.Classifier()
return _classifier.run(records=[record])
@staticmethod
@patch.object(rules_engine, 'AlertForwarder')
@patch('rules.helpers.base.random_bool', return_value=True)
@patch.object(rules_engine.RulesEngine, '_load_rule_table', return_value=None)
def _setup_rules_engine(dirs, *_):
"""Create a fresh rules engine and process the record, returning the result"""
return rules_engine.RulesEngine(*dirs)
def _run_rules_engine(self, record):
"""Create a fresh rules engine and process the record, returning the result"""
with patch.object(rules_engine.ThreatIntel, '_query') as ti_mock:
ti_mock.side_effect = ThreatIntelMocks.get_mock_values
# pylint: disable=protected-access
self._rules_engine._lookup_tables._tables.clear()
for table in LookupTableMocks.get_mock_values():
self._rules_engine._lookup_tables._tables[table.table_name] = table
return self._rules_engine.run(records=record)
@staticmethod
def _run_alerting(record):
"""Create a fresh alerts processor and send the alert(s), returning the result"""
with patch.object(alert_processor, 'AlertTable'):
alert_proc = alert_processor.AlertProcessor()
return alert_proc.run(event=record.dynamo_record())
def _check_prereqs(self):
if self._type == self.Types.LIVE:
return check_credentials()
return True
def _finalize(self):
summary = [
format_underline('\nSummary:\n'),
'Total Tests: {}'.format(self._passed + self._failed),
format_green('Pass: {}'.format(self._passed)) if self._passed else 'Pass: 0',
format_red('Fail: {}\n'.format(self._failed)) if self._failed else 'Fail: 0\n',
]
print('\n'.join(summary))
# If rule are being tested and no filtering is being performed, log any untested rules
if self._testing_rules and not self._is_filtered:
all_rules = set(rule.Rule.rule_names()) - rule.Rule.disabled_rules()
untested_rules = sorted(all_rules.difference(self._tested_rules))
if not untested_rules:
return
print(format_yellow('No test events configured for the following rules:'))
for rule_name in untested_rules:
print(format_yellow(rule_name))
@property
def _is_filtered(self):
return bool(self._files_filter or self._rules)
@property
def _testing_rules(self):
return self._type in {self.Types.RULES, self.Types.LIVE}
def _process_directory(self, directory):
"""Process rules and test files in the the rule directory"""
print('\nRunning tests for files found in: {}'.format(directory))
for root, event_files in self._get_test_files(directory):
for event_file in event_files:
full_path = os.path.join(root, event_file)
if self._files_filter and full_path not in self._files_filter:
continue
self._process_test_file(full_path)
def _process_test_file(self, test_file_path):
"""Process an individual test file"""
# Iterate over the individual test events in the file
event_file = TestEventFile(test_file_path)
for event in event_file.process_file(self._config, self._verbose, self._testing_rules):
# Each test event should be tied to a cluster, via the configured data_sources
# Reset the CLUSTER env var for each test, since it could differ between each event
# This env var is used from within the classifier to load the proper cluster config
if 'CLUSTER' in os.environ:
del os.environ['CLUSTER']
for cluster_name, cluster_value in self._config['clusters'].items():
if event.service not in cluster_value['data_sources']:
LOGGER.debug(
'Cluster "%s" does not have service "%s" configured as a data source',
cluster_name,
event.service
)
continue
sources = set(cluster_value['data_sources'][event.service])
if event.source not in sources:
LOGGER.debug(
'Cluster "%s" does not have the source "%s" configured as a data source '
'for service "%s"',
cluster_name,
event.source,
event.service
)
continue
# If we got here, then this cluster is actually configured for this data source
os.environ['CLUSTER'] = cluster_name
break
# A misconfigured test event and/or cluster config can cause this to be unset
if 'CLUSTER' not in os.environ:
error = (
'Test event\'s "service" ({}) and "source" ({}) are not defined within '
'the "data_sources" of any configured clusters: {}:{}'
).format(event.service, event.source, event_file.path, event.index)
raise ConfigError(error)
classifier_result = self._run_classification(event.record)
event.set_classified_result(classifier_result)
if not event:
continue
# Ensure this event actually contains any specific rules, if filtering is being used
if not event.check_for_rules(self._rules):
continue
if event.classify_only:
continue # Do not run rules on events that are only for validation
self._tested_rules.update(event.expected_rules)
if self._type in {self.Types.RULES, self.Types.LIVE}:
event.alerts = self._run_rules_engine(event.classified_log.sqs_messages)
if event.publisher_tests:
runner = PublisherTestRunner()
runner.run_publisher_tests(event)
if self._type == self.Types.LIVE:
for alert in event.alerts:
alert_result = self._run_alerting(alert)
event.add_live_test_result(alert.rule_name, alert_result)
self._passed += event_file.passed
self._failed += event_file.failed
# It is possible for a test_event to have no results, but contain errors
# so only print it if it does and if quiet mode is not being used
# Quite mode is overridden if not all of the events passed
if event_file.error or not (self._quiet and event_file.all_passed):
if event_file.should_print:
print(event_file)
def run(self):
"""Run the tests"""
if not self._check_prereqs():
return
for directory in self._rules_dirs:
# The CLI checks if these directories exist, so no need to check here
self._process_directory(directory)
self._finalize()
return self._failed == 0
@staticmethod
def _get_test_files(directory):
"""Helper to get rule test files
Args:
directory (str): Path to directory containing test files
Yields:
str: Path to test event file
"""
for root, _, test_event_files in os.walk(directory):
# Simple filter to remove any non-json files first
files = [
file for file in sorted(test_event_files)
if os.path.splitext(file)[1] == '.json'
]
if not files:
continue
yield root, files
class PublisherTestRunner:
PUBLISHER_CONDITIONALS = {
'is': {
'comparator': lambda subject, predicate: subject == predicate,
'clause': 'should have been',
},
'in': {
'comparator': lambda s, p: s in p if isinstance(p, list) else p.contains(s),
'clause': 'should have been one of'
},
'contains': {
'comparator': lambda s, p: p in s,
'clause': 'should have contained'
}
}
def run_publisher_tests(self, event):
"""
Runs all publishers and compares their results to the suite of configured publisher tests.
Args:
event (TestEvent): The integration test
"""
for alert in event.alerts:
publication_results = self._run_publishers(alert)
publisher_test_results = []
for output, individual_tests in event.publisher_tests.items():
for publisher_test in individual_tests:
if isinstance(publisher_test, list):
if len(publisher_test) != 3:
publisher_test_results.append({
'success': False,
'error': (
'Invalid publisher test specified: {}'
'Publisher test must be a triple with elements: '
'(jsonpath, condition, condition_value)'
).format(publisher_test),
'output_descriptor': output,
})
continue
jsonpath, condition, condition_value = publisher_test
elif isinstance(publisher_test, dict):
valid_test_syntax = (
'jmespath_expression' in publisher_test and
'condition' in publisher_test and
'value' in publisher_test
)
if not valid_test_syntax:
publisher_test_results.append({
'success': False,
'error': (
'Invalid publisher test specified: {}'
'Publisher test must be a dict with keys: '
'(jmespath_expression, condition, value)'
).format(publisher_test),
'output_descriptor': output,
})
continue
jsonpath = publisher_test['jmespath_expression']
condition = publisher_test['condition']
condition_value = publisher_test['value']
else:
publisher_test_results.append({
'success': False,
'error': (
'Invalid publisher test specified: {}'
'Publisher test must be list or dict'
),
'output_descriptor': output,
})
continue
if output not in publication_results:
publisher_test_results.append({
'success': False,
'error': (
'No such output {} was configured for this alert'
).format(output),
'output_descriptor': output,
})
continue
publication = publication_results[output]['publication']
subject_value = jmespath.search(jsonpath, publication)
conditional = self.PUBLISHER_CONDITIONALS.get(condition, None)
if not conditional:
publisher_test_results.append({
'success': False,
'error': (
'Invalid condition specified: {}\n'
'Valid conditions are: {}'
).format(condition, list(self.PUBLISHER_CONDITIONALS.keys())),
'output_descriptor': output,
})
continue
res = conditional['comparator'](subject_value, condition_value)
publisher_test_results.append({
'success': res,
'failure': None if res else (
'Item at path "{}" {} "{}",\nActual value: "{}"'.format(
jsonpath,
conditional['clause'],
condition_value,
subject_value
)
),
'output_descriptor': output
})
event.set_publication_results(publisher_test_results)
@staticmethod
def _run_publishers(alert):
"""Runs publishers for all currently configured outputs on the given alert
Args:
alert (Alert): The alert
Returns:
dict: A dict keyed by output:descriptor strings, mapped to nested dicts.
self._rules_engine._lookup_tables The nested dicts have 2 keys:
- publication (dict): The dict publication
- success (bool): True if the publishing finished, False if it errored.
"""
configured_outputs = alert.outputs
results = {}
for configured_output in configured_outputs:
[output_name, descriptor] = configured_output.split(':')
try:
output = MagicMock(spec=OutputDispatcher, __service__=output_name)
results[configured_output] = {
'publication': compose_alert(alert, output, descriptor),
'success': True,
}
except (RuntimeError, TypeError, NameError) as err:
results[configured_output] = {
'success': False,
'error': err,
}
return results
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._workspace_managed_identity_sql_control_settings_operations import build_create_or_update_request_initial, build_get_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceManagedIdentitySqlControlSettingsOperations:
"""WorkspaceManagedIdentitySqlControlSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.ManagedIdentitySqlControlSettingsModel":
"""Get Managed Identity Sql Control Settings.
Get Managed Identity Sql Control Settings.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedIdentitySqlControlSettingsModel, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ManagedIdentitySqlControlSettingsModel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedIdentitySqlControlSettingsModel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedIdentitySqlControlSettingsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
managed_identity_sql_control_settings: "_models.ManagedIdentitySqlControlSettingsModel",
**kwargs: Any
) -> Optional["_models.ManagedIdentitySqlControlSettingsModel"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ManagedIdentitySqlControlSettingsModel"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(managed_identity_sql_control_settings, 'ManagedIdentitySqlControlSettingsModel')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagedIdentitySqlControlSettingsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
managed_identity_sql_control_settings: "_models.ManagedIdentitySqlControlSettingsModel",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedIdentitySqlControlSettingsModel"]:
"""Create or update Managed Identity Sql Control Settings.
Create or update Managed Identity Sql Control Settings.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param managed_identity_sql_control_settings: Managed Identity Sql Control Settings.
:type managed_identity_sql_control_settings:
~azure.mgmt.synapse.models.ManagedIdentitySqlControlSettingsModel
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
ManagedIdentitySqlControlSettingsModel or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.ManagedIdentitySqlControlSettingsModel]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedIdentitySqlControlSettingsModel"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
managed_identity_sql_control_settings=managed_identity_sql_control_settings,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedIdentitySqlControlSettingsModel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/managedIdentitySqlControlSettings/default'} # type: ignore
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import itertools
import json
import logging
import os
from django.conf import settings
import glanceclient as glance_client
from six.moves import _thread as thread
from horizon import exceptions
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
VERSIONS = base.APIVersionManager("image", preferred_version=2)
@memoized
def glanceclient(request, version='1'):
url = base.url_for(request, 'image')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return glance_client.Client(version, url, token=request.user.token.id,
insecure=insecure, cacert=cacert)
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
def image_get(request, image_id):
"""Returns an Image object populated with metadata for image
with supplied identifier.
"""
image = glanceclient(request).images.get(image_id)
if not hasattr(image, 'name'):
image.name = None
return image
def image_list_detailed(request, marker=None, sort_dir='desc',
sort_key='created_at', filters=None, paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters or {}}
if marker:
kwargs['marker'] = marker
kwargs['sort_dir'] = sort_dir
kwargs['sort_key'] = sort_key
images_iter = glanceclient(request).images.list(page_size=request_size,
limit=limit,
**kwargs)
has_prev_data = False
has_more_data = False
if paginate:
images = list(itertools.islice(images_iter, request_size))
# first and middle page condition
if len(images) > page_size:
images.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'asc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
else:
images = list(images_iter)
return (images, has_more_data, has_prev_data)
def image_update(request, image_id, **kwargs):
image_data = kwargs.get('data', None)
try:
image = glanceclient(request).images.update(image_id, **kwargs)
except Exception:
exceptions.handle(request, ignore=True)
finally:
if image_data:
try:
os.remove(image_data.file.name)
except Exception as e:
msg = (('Failed to remove temporary image file '
'%(file)s (%(e)s)') %
dict(file=image_data.file.name, e=str(e)))
LOG.warn(msg)
return image
def image_create(request, **kwargs):
copy_from = kwargs.pop('copy_from', None)
data = kwargs.pop('data', None)
image = glanceclient(request).images.create(**kwargs)
if data:
thread.start_new_thread(image_update,
(request, image.id),
{'data': data,
'purge_props': False})
elif copy_from:
thread.start_new_thread(image_update,
(request, image.id),
{'copy_from': copy_from,
'purge_props': False})
return image
def image_update_properties(request, image_id, remove_props=None, **kwargs):
"""Add or update a custom property of an image."""
return glanceclient(request, '2').images.update(image_id,
remove_props,
**kwargs)
def image_delete_properties(request, image_id, keys):
"""Delete custom properties for an image."""
return glanceclient(request, '2').images.update(image_id, keys)
class BaseGlanceMetadefAPIResourceWrapper(base.APIResourceWrapper):
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_name', None))
def as_json(self, indent=4):
result = collections.OrderedDict()
for attr in self._attrs:
if hasattr(self, attr):
result[attr] = getattr(self, attr)
return json.dumps(result, indent=indent)
def to_dict(self):
return self._apiresource
class Namespace(BaseGlanceMetadefAPIResourceWrapper):
_attrs = ['namespace', 'display_name', 'description',
'resource_type_associations', 'visibility', 'protected',
'created_at', 'updated_at', 'properties', 'objects']
@property
def resource_type_names(self):
result = [resource_type['name'] for resource_type in
getattr(self._apiresource, 'resource_type_associations')]
return sorted(result)
@property
def public(self):
if getattr(self._apiresource, 'visibility') == 'public':
return True
else:
return False
def metadefs_namespace_get(request, namespace, resource_type=None, wrap=False):
namespace = glanceclient(request, '2').\
metadefs_namespace.get(namespace, resource_type=resource_type)
# There were problems with using the wrapper class in
# in nested json serialization. So sometimes, it is not desirable
# to wrap.
if wrap:
return Namespace(namespace)
else:
return namespace
def metadefs_namespace_list(request,
filters={},
sort_dir='asc',
sort_key='namespace',
marker=None,
paginate=False):
"""Retrieve a listing of Namespaces
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen namespace.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
:param sort_key: The field to sort on (for example, 'created_at'). Default
is namespace. The way base namespaces are loaded into glance
typically at first deployment is done in a single transaction
giving them a potentially unpredictable sort result when using
create_at.
:param filters: specifies addition fields to filter on such as
resource_types.
:returns A tuple of three values:
1) Current page results
2) A boolean of whether or not there are previous page(s).
3) A boolean of whether or not there are more page(s).
"""
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters}
if marker:
kwargs['marker'] = marker
kwargs['sort_dir'] = sort_dir
kwargs['sort_key'] = sort_key
namespaces_iter = glanceclient(request, '2').metadefs_namespace.list(
page_size=request_size, limit=limit, **kwargs)
has_prev_data = False
has_more_data = False
if paginate:
namespaces = list(itertools.islice(namespaces_iter, request_size))
# first and middle page condition
if len(namespaces) > page_size:
namespaces.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'desc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
else:
namespaces = list(namespaces_iter)
namespaces = [Namespace(namespace) for namespace in namespaces]
return namespaces, has_more_data, has_prev_data
def metadefs_namespace_create(request, namespace):
return glanceclient(request, '2').metadefs_namespace.create(**namespace)
def metadefs_namespace_update(request, namespace_name, **properties):
return glanceclient(request, '2').metadefs_namespace.update(
namespace_name,
**properties)
def metadefs_namespace_delete(request, namespace_name):
return glanceclient(request, '2').metadefs_namespace.delete(namespace_name)
def metadefs_resource_types_list(request):
return glanceclient(request, '2').metadefs_resource_type.list()
def metadefs_namespace_resource_types(request, namespace_name):
resource_types = glanceclient(request, '2').metadefs_resource_type.get(
namespace_name)
# metadefs_resource_type.get() returns generator, converting it to list
return list(resource_types)
def metadefs_namespace_add_resource_type(request,
namespace_name,
resource_type):
return glanceclient(request, '2').metadefs_resource_type.associate(
namespace_name, **resource_type)
def metadefs_namespace_remove_resource_type(request,
namespace_name,
resource_type_name):
glanceclient(request, '2').metadefs_resource_type.deassociate(
namespace_name, resource_type_name)
|
|
# MathObject.py
import copy
import datetime
from MathException import MathException
class MathObject(object):
TYPE_NUMERIC_SCALAR_CONSTANT = 'TYPE_NUMERIC_SCALAR_CONSTANT'
TYPE_SYMBOLIC_SCALAR_CONSTANT = 'TYPE_SYMBOLIC_SCALAR_CONSTANT'
TYPE_SYMBOLIC_VECTOR_CONSTANT = 'TYPE_SYMBOLIC_VECTOR_CONSTANT'
TYPE_SUM = 'TYPE_SUM'
TYPE_PRODUCT = 'TYPE_PRODUCT'
TYPE_NEGATE = 'TYPE_NEGATE'
TYPE_INVERT = 'TYPE_INVERT'
TYPE_REVERSE = 'TYPE_REVERSE'
TYPE_OUTER_PRODUCT = 'TYPE_OUTER_PRODUCT'
TYPE_INNER_PRODUCT = 'TYPE_INNER_PRODUCT'
TYPE_MATRIX = 'TYPE_MATRIX'
TYPE_SQUARE_ROOT = 'TYPE_SQUARE_ROOT'
TYPE_SINE = 'TYPE_SINE'
TYPE_COSINE = 'TYPE_COSINE'
TYPE_TANGENT = 'TYPE_TANGENT'
TYPE_EXPONENT = 'TYPE_EXPONENT'
TYPE_LOG = 'TYPE_LOG'
TYPE_FACTORIAL = 'TYPE_FACTORIAL'
TYPE_GRADE_PART = 'TYPE_GRADE_PART'
def __init__(self, _type, value):
self._type = _type
self.value = value
def IsTypeIn(self, type_list):
return any([self._type == _type for _type in type_list])
def GenerateValuePairs(self):
if type(self.value) is list and self._type != self.TYPE_MATRIX:
for i in range(len(self.value)):
for j in range(i + 1, len(self.value)):
yield self.value[i], self.value[j], i, j
def DeleteValues(self, offset_list):
if type(self.value) is list and self._type != self.TYPE_MATRIX:
offset_list.sort(reversed=True)
for i in offset_list:
del self.value[i]
def FindValueOfType(self, type_list, recursive=False):
if type(self.value) is list and self._type != self.TYPE_MATRIX:
for i, math_object in enumerate(self.value):
if math_object.IsTypeIn(type_list):
return math_object, i
if recursive:
for math_object in self.value:
sub_math_object, i = math_object.FindValueOfType(type_list, True)
if sub_math_object is not None:
return sub_math_object, i
return None, None
def FilterValuesOfType(self, type_list):
if type(self.value) is list and self._type != self.TYPE_MATRIX:
pass_list = []
fail_list = []
for sub_math_object in self.value:
if sub_math_object.IsTypeIn(type_list):
pass_list.append(sub_math_object)
else:
fail_list.append(sub_math_object)
return pass_list, fail_list
return None, None
def GatherAllTypes(self, type_list):
type_list.append(self._type)
if self._type == self.TYPE_MATRIX:
pass
if type(self.value) is list:
for math_object in self.value:
math_object.GatherAllTypes(type_list)
def IsScalar(self):
# In most cases, returning false here only means that we do not know if the math object tree
# rooted at this node ultimately evaluates to a scalar value. If, on the other hand, we
# return true, then we have positively identified a scalar value.
if self.IsTypeIn([self.TYPE_SYMBOLIC_SCALAR_CONSTANT, self.TYPE_NUMERIC_SCALAR_CONSTANT]):
return True
elif self._type == self.TYPE_MATRIX:
return False
elif self._type == self.TYPE_INNER_PRODUCT:
pass_list, fail_list = self.FilterValuesOfType([self.TYPE_SYMBOLIC_VECTOR_CONSTANT])
if len(pass_list) != 2 and len(pass_list) != 0:
return False
for sub_math_object in fail_list:
if not sub_math_object.IsScalar():
return False
return True
elif type(self.value) is list:
for sub_math_object in self.value:
if not sub_math_object.IsScalar():
return False
return True
return False
@staticmethod
def Cast(other):
if type(other) is str:
if other[0] == '$':
return MathObject(MathObject.TYPE_SYMBOLIC_SCALAR_CONSTANT, other[1:])
else:
return MathObject(MathObject.TYPE_SYMBOLIC_VECTOR_CONSTANT, other)
elif type(other) is float:
return MathObject(MathObject.TYPE_NUMERIC_SCALAR_CONSTANT, other)
elif type(other) is int:
return MathObject(MathObject.TYPE_NUMERIC_SCALAR_CONSTANT, float(other))
elif type(other) is list:
pass # TODO: Make matrix.
else:
raise MathException('Failed to cast "%s" as a MathObject instance.' % str(other))
def Copy(self):
return copy.deepcopy(self)
def CopyValue(self):
return copy.deepcopy(self.value)
def Simplify(self):
# TODO: Import applicable math alg derivatives here and put instances in the list.
math_algs_list = []
return self._Process(math_algs_list)
def _Process(self, math_algs_list, timeout_seconds=None):
# Note that the termination of our algorithm here depends on
# there being no back-and-forth fighting between two or more
# math algorithms in the given list.
start_time = datetime.datetime.now()
result = self.Copy()
while True:
for math_alg in math_algs_list:
new_result = math_alg.Apply(result)
if new_result is not None:
result = new_result
break
else:
break
current_time = datetime.datetime.now()
delta_time = current_time - start_time
if timeout_seconds is not None and delta_time.total_seconds() >= timeout_seconds:
raise MathException('Timed-out while trying to process math object tree.')
return result
def ApplyAlgorithm(self, math_alg_name, timeout_seconds=None):
# TODO: Import all math alg derivatives here.
from MathAlgorithm import MathAlgorithm
for math_alg_class in MathAlgorithm.__subclasses__:
if math_alg_class.__name__ == math_alg_name:
return self._Process([math_alg_class()], timeout_seconds)
raise MathException('No math algorithms found by the name "%s".' % math_alg_name)
def __str__(self):
from MathRenderer import MathRenderer
renderer = MathRenderer()
return renderer.Render(self)
def __add__(self, other):
if isinstance(other, MathObject):
return MathObject(MathObject.TYPE_ADD, [self.Copy(), other.Copy()])
else:
return MathObject(MathObject.TYPE_ADD, [self.Copy(), self.Cast(other)])
def __radd__(self, other):
return MathObject(MathObject.TYPE_ADD, [self.Cast(other), self.Copy()])
def __sub__(self, other):
if isinstance(other, MathObject):
return MathObject(MathObject.TYPE_ADD, [self.Copy(), MathObject(MathObject.TYPE_NEGATE, other.Copy())])
else:
return MathObject(MathObject.TYPE_ADD, [self.Copy(), MathObject(MathObject.TYPE_NEGATE, self.Cast(other))])
def __rsub__(self, other):
return MathObject(MathObject.TYPE_ADD, [MathObject(MathObject.TYPE_NEGATE, self.Cast(other)), self.Copy()])
def __mul__(self, other):
if isinstance(other, MathObject):
return MathObject(MathObject.TYPE_MULTIPLY, [self.Copy(), other.Copy()])
else:
return MathObject(MathObject.TYPE_MULTIPLY, [self.Copy(), self.Cast(other)])
def __rmul__(self, other):
return MathObject(MathObject.TYPE_MULTIPLY, [self.Cast(other), self.Copy()])
def __truediv__(self, other):
if isinstance(other, MathObject):
return MathObject(MathObject.TYPE_MULTIPLY, [self.Copy(), MathObject(MathObject.TYPE_INVERT, other.Copy())])
else:
return MathObject(MathObject.TYPE_MULTIPLY, [self.Copy(), MathObject(MathObject.TYPE_INVERT, self.Cast(other))])
def __rtruediv__(self, other):
return MathObject(MathObject.TYPE_MULTIPLY, [MathObject(MathObject.TYPE_INVERT, self.Cast(other)), self.Copy()])
def __neg__(self, other):
return MathObject(MathObject.TYPE_NEGATE, self.Copy())
def __invert__(self):
return MathObject(MathObject.TYPE_INVERT, self.Copy())
def __or__(self, other):
if isinstance(other, MathObject):
return MathObject(MathObject.TYPE_INNER_MULTIPLY, [self.Copy(), other.Copy()])
else:
return MathObject(MathObject.TYPE_INNER_MULTIPLY, [self.Copy(), self.Cast(other)])
def __ror__(self, other):
return MathObject(MathObject.TYPE_INNER_MULTIPLY, [self.Cast(other), self.Copy()])
def __xor__(self, other):
if isinstance(other, MathObject):
return MathObject(MathObject.TYPE_OUTER_MULTIPLY, [self.Copy(), other.Copy()])
else:
return MathObject(MathObject.TYPE_OUTER_MULTIPLY, [self.Copy(), self.Cast(other)])
def __rxor__(self, other):
return MathObject(MathObject.TYPE_OUTER_MULTIPLY, [self.Cast(other), self.Copy()])
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import contextlib
import json
import io
import os
import shutil
import signal
import socket
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from docker.errors import APIError, NotFound
from docker.utils import kwargs_from_env
from .base import requires_api_version
from .test import Cleanup
# FIXME: missing tests for
# export; history; insert; port; push; tag; get; load; stats
warnings.simplefilter('error')
compare_version = docker.utils.compare_version
EXEC_DRIVER = []
BUSYBOX = 'busybox:buildroot-2014.02'
def exec_driver_is_native():
global EXEC_DRIVER
if not EXEC_DRIVER:
c = docker_client()
EXEC_DRIVER = c.info()['ExecutionDriver']
c.close()
return EXEC_DRIVER.startswith('native')
def docker_client(**kwargs):
return docker.Client(**docker_client_kwargs(**kwargs))
def docker_client_kwargs(**kwargs):
client_kwargs = kwargs_from_env(assert_hostname=False)
client_kwargs.update(kwargs)
return client_kwargs
def setup_module():
c = docker_client()
try:
c.inspect_image(BUSYBOX)
except NotFound:
c.pull(BUSYBOX)
c.inspect_image(BUSYBOX)
c.close()
class BaseTestCase(unittest.TestCase):
tmp_imgs = []
tmp_containers = []
tmp_folders = []
tmp_volumes = []
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertCountEqual = self.assertItemsEqual
self.client = docker_client(timeout=60)
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
self.tmp_volumes = []
def tearDown(self):
for img in self.tmp_imgs:
try:
self.client.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
self.client.stop(container, timeout=1)
self.client.remove_container(container)
except docker.errors.APIError:
pass
for folder in self.tmp_folders:
shutil.rmtree(folder)
for volume in self.tmp_volumes:
try:
self.client.remove_volume(volume)
except docker.errors.APIError:
pass
self.client.close()
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
exitcode = self.client.wait(container)
if exitcode != 0:
output = self.client.logs(container)
raise Exception(
"Container exited with code {}:\n{}"
.format(exitcode, output))
return container
#########################
# INFORMATION TESTS #
#########################
class TestVersion(BaseTestCase):
def runTest(self):
res = self.client.version()
self.assertIn('GoVersion', res)
self.assertIn('Version', res)
self.assertEqual(len(res['Version'].split('.')), 3)
class TestInfo(BaseTestCase):
def runTest(self):
res = self.client.info()
self.assertIn('Containers', res)
self.assertIn('Images', res)
self.assertIn('Debug', res)
class TestSearch(BaseTestCase):
def runTest(self):
self.client = docker_client(timeout=10)
res = self.client.search('busybox')
self.assertTrue(len(res) >= 1)
base_img = [x for x in res if x['name'] == 'busybox']
self.assertEqual(len(base_img), 1)
self.assertIn('description', base_img[0])
###################
# LISTING TESTS #
###################
class TestImages(BaseTestCase):
def runTest(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
res10 = res1[0]
self.assertIn('Created', res10)
self.assertIn('RepoTags', res10)
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
self.assertEqual(len(distinct), self.client.info()['Images'])
class TestImageIds(BaseTestCase):
def runTest(self):
res1 = self.client.images(quiet=True)
self.assertEqual(type(res1[0]), six.text_type)
class TestListContainers(BaseTestCase):
def runTest(self):
res0 = self.client.containers(all=True)
size = len(res0)
res1 = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res1)
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
res2 = self.client.containers(all=True)
self.assertEqual(size + 1, len(res2))
retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
self.assertEqual(len(retrieved), 1)
retrieved = retrieved[0]
self.assertIn('Command', retrieved)
self.assertEqual(retrieved['Command'], six.text_type('true'))
self.assertIn('Image', retrieved)
self.assertRegex(retrieved['Image'], r'busybox:.*')
self.assertIn('Status', retrieved)
#####################
# CONTAINER TESTS #
#####################
class TestCreateContainer(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
class TestCreateContainerWithBinds(BaseTestCase):
def setUp(self):
super(TestCreateContainerWithBinds, self).setUp()
self.mount_dest = '/mnt'
# Get a random pathname - we don't need it to exist locally
self.mount_origin = tempfile.mkdtemp()
shutil.rmtree(self.mount_origin)
self.filename = 'shared.txt'
self.run_with_volume(
False,
BUSYBOX,
['touch', os.path.join(self.mount_dest, self.filename)],
)
def run_with_volume(self, ro, *args, **kwargs):
return self.run_container(
*args,
volumes={self.mount_dest: {}},
host_config=self.client.create_host_config(
binds={
self.mount_origin: {
'bind': self.mount_dest,
'ro': ro,
},
},
network_mode='none'
),
**kwargs
)
def test_rw(self):
container = self.run_with_volume(
False,
BUSYBOX,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(self.filename, logs)
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
def test_ro(self):
container = self.run_with_volume(
True,
BUSYBOX,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(self.filename, logs)
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
def check_container_data(self, inspect_data, rw):
if docker.utils.compare_version('1.20', self.client._version) < 0:
self.assertIn('Volumes', inspect_data)
self.assertIn(self.mount_dest, inspect_data['Volumes'])
self.assertEqual(
self.mount_origin, inspect_data['Volumes'][self.mount_dest]
)
self.assertIn(self.mount_dest, inspect_data['VolumesRW'])
self.assertFalse(inspect_data['VolumesRW'][self.mount_dest])
else:
self.assertIn('Mounts', inspect_data)
filtered = list(filter(
lambda x: x['Destination'] == self.mount_dest,
inspect_data['Mounts']
))
self.assertEqual(len(filtered), 1)
mount_data = filtered[0]
self.assertEqual(mount_data['Source'], self.mount_origin)
self.assertEqual(mount_data['RW'], rw)
@requires_api_version('1.20')
class CreateContainerWithGroupAddTest(BaseTestCase):
def test_group_id_ints(self):
container = self.client.create_container(
BUSYBOX, 'id -G',
host_config=self.client.create_host_config(group_add=[1000, 1001])
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
def test_group_id_strings(self):
container = self.client.create_container(
BUSYBOX, 'id -G', host_config=self.client.create_host_config(
group_add=['1000', '1001']
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
class CreateContainerWithLogConfigTest(BaseTestCase):
def test_valid_log_driver_and_log_opt(self):
log_config = docker.utils.LogConfig(
type='json-file',
config={'max-file': '100'}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], log_config.type)
self.assertEqual(container_log_config['Config'], log_config.config)
def test_invalid_log_driver_raises_exception(self):
log_config = docker.utils.LogConfig(
type='asdf-nope',
config={}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
expected_msg = "logger: no log driver named 'asdf-nope' is registered"
with pytest.raises(APIError) as excinfo:
# raises an internal server error 500
self.client.start(container)
assert expected_msg in str(excinfo.value)
@pytest.mark.skipif(True,
reason="https://github.com/docker/docker/issues/15633")
def test_valid_no_log_driver_specified(self):
log_config = docker.utils.LogConfig(
type="",
config={'max-file': '100'}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], log_config.config)
def test_valid_no_config_specified(self):
log_config = docker.utils.LogConfig(
type="json-file",
config=None
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], {})
class TestCreateContainerReadOnlyFs(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
ctnr = self.client.create_container(
BUSYBOX, ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
read_only=True, network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
res = self.client.wait(ctnr)
self.assertNotEqual(res, 0)
class TestCreateContainerWithName(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true', name='foobar')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
self.assertEqual('/foobar', inspect['Name'])
class TestRenameContainer(BaseTestCase):
def runTest(self):
version = self.client.version()['Version']
name = 'hong_meiling'
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
if version == '1.5.0':
self.assertEqual(name, inspect['Name'])
else:
self.assertEqual('/{0}'.format(name), inspect['Name'])
class TestStartContainer(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
class TestStartContainerWithDictInsteadOfId(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
class TestCreateContainerPrivileged(BaseTestCase):
def runTest(self):
res = self.client.create_container(
BUSYBOX, 'true', host_config=self.client.create_host_config(
privileged=True, network_mode='none'
)
)
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
# Since Nov 2013, the Privileged flag is no longer part of the
# container's config exposed via the API (safety concerns?).
#
if 'Privileged' in inspect['Config']:
self.assertEqual(inspect['Config']['Privileged'], True)
class TestWait(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(id)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class TestWaitWithDictInsteadOfId(BaseTestCase):
def runTest(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
exitcode = self.client.wait(res)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(res)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class TestLogs(BaseTestCase):
def runTest(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestLogsWithTailOption(BaseTestCase):
def runTest(self):
snippet = '''Line1
Line2'''
container = self.client.create_container(
BUSYBOX, 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id, tail=1)
self.assertEqual(logs, ('Line2\n').encode(encoding='ascii'))
# class TestLogsStreaming(BaseTestCase):
# def runTest(self):
# snippet = 'Flowering Nights (Sakuya Iyazoi)'
# container = self.client.create_container(
# BUSYBOX, 'echo {0}'.format(snippet)
# )
# id = container['Id']
# self.client.start(id)
# self.tmp_containers.append(id)
# logs = bytes() if six.PY3 else str()
# for chunk in self.client.logs(id, stream=True):
# logs += chunk
# exitcode = self.client.wait(id)
# self.assertEqual(exitcode, 0)
# self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestLogsWithDictInsteadOfId(BaseTestCase):
def runTest(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(container)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
class TestDiff(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(id)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class TestDiffWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(container)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class TestStop(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.stop(id, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestStopWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
self.client.stop(container, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKill(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKillWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
if exec_driver_is_native():
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class TestKillWithSignal(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '60'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=signal.SIGKILL)
exitcode = self.client.wait(id)
self.assertNotEqual(exitcode, 0)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False, state)
class TestPort(BaseTestCase):
def runTest(self):
port_bindings = {
'1111': ('127.0.0.1', '4567'),
'2222': ('127.0.0.1', '4568')
}
container = self.client.create_container(
BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
host_config=self.client.create_host_config(
port_bindings=port_bindings, network_mode='bridge'
)
)
id = container['Id']
self.client.start(container)
# Call the port function on each biding and compare expected vs actual
for port in port_bindings:
actual_bindings = self.client.port(container, port)
port_binding = actual_bindings.pop()
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
self.assertEqual(ip, port_bindings[port][0])
self.assertEqual(host_port, port_bindings[port][1])
self.client.kill(id)
class TestMacAddress(BaseTestCase):
def runTest(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected)
id = container['Id']
self.client.start(container)
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
self.client.kill(id)
class TestRestart(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(id, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class TestRestartWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(container, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class TestRemoveContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(id)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class TestRemoveContainerWithDictInsteadOfId(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(container)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class TestCreateContainerWithVolumesFrom(BaseTestCase):
def runTest(self):
vol_names = ['foobar_vol0', 'foobar_vol1']
res0 = self.client.create_container(
BUSYBOX, 'true', name=vol_names[0]
)
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
BUSYBOX, 'true', name=vol_names[1]
)
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
with self.assertRaises(docker.errors.DockerException):
self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
volumes_from=vol_names
)
res2 = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
info = self.client.inspect_container(res2['Id'])
self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
class TestCreateContainerWithLinks(BaseTestCase):
def runTest(self):
res0 = self.client.create_container(
BUSYBOX, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
BUSYBOX, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# we don't want the first /
link_path1 = self.client.inspect_container(container1_id)['Name'][1:]
link_alias1 = 'mylink1'
link_env_prefix1 = link_alias1.upper()
link_path2 = self.client.inspect_container(container2_id)['Name'][1:]
link_alias2 = 'mylink2'
link_env_prefix2 = link_alias2.upper()
res2 = self.client.create_container(
BUSYBOX, 'env', host_config=self.client.create_host_config(
links={link_path1: link_alias1, link_path2: link_alias2},
network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
self.assertEqual(self.client.wait(container3_id), 0)
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn('{0}_NAME='.format(link_env_prefix1), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs)
self.assertIn('{0}_NAME='.format(link_env_prefix2), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs)
class TestRestartingContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '2'],
host_config=self.client.create_host_config(
restart_policy={"Name": "always", "MaximumRetryCount": 0},
network_mode='none'
)
)
id = container['Id']
self.client.start(id)
self.client.wait(id)
with self.assertRaises(docker.errors.APIError) as exc:
self.client.remove_container(id)
err = exc.exception.response.text
self.assertIn(
'You cannot remove a running container', err
)
self.client.remove_container(id, force=True)
class TestExecuteCommand(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello\n')
class TestExecuteCommandString(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello world\n')
class TestExecuteCommandStringAsUser(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'default\n')
class TestExecuteCommandStringAsRoot(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'root\n')
class TestExecuteCommandStreaming(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
self.assertIn('Id', exec_id)
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
self.assertEqual(res, b'hello\nworld\n')
class TestExecInspect(BaseTestCase):
def runTest(self):
if not exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
self.assertIn('Id', exec_id)
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
self.assertIn('ExitCode', exec_info)
self.assertNotEqual(exec_info['ExitCode'], 0)
class TestRunContainerStreaming(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
sock = self.client.attach_socket(container, ws=False)
self.assertTrue(sock.fileno() > -1)
class TestPauseUnpauseContainer(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.tmp_containers.append(id)
self.client.start(container)
self.client.pause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], True)
self.client.unpause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], False)
class TestCreateContainerWithHostPidMode(BaseTestCase):
def runTest(self):
ctnr = self.client.create_container(
BUSYBOX, 'true', host_config=self.client.create_host_config(
pid_mode='host', network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
self.assertIn('HostConfig', inspect)
host_config = inspect['HostConfig']
self.assertIn('PidMode', host_config)
self.assertEqual(host_config['PidMode'], 'host')
#################
# LINKS TESTS #
#################
class TestRemoveLink(BaseTestCase):
def runTest(self):
# Create containers
container1 = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
# Create Link
# we don't want the first /
link_path = self.client.inspect_container(container1_id)['Name'][1:]
link_alias = 'mylink'
container2 = self.client.create_container(
BUSYBOX, 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}, network_mode='none'
)
)
container2_id = container2['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
link_name = '%s/%s' % (linked_name, link_alias)
self.client.remove_container(link_name, link=True)
# Link is gone
containers = self.client.containers(all=True)
retrieved = [x for x in containers if link_name in x['Names']]
self.assertEqual(len(retrieved), 0)
# Containers are still there
retrieved = [
x for x in containers if x['Id'].startswith(container1_id) or
x['Id'].startswith(container2_id)
]
self.assertEqual(len(retrieved), 2)
##################
# IMAGES TESTS #
##################
class TestPull(BaseTestCase):
def runTest(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class TestPullStream(BaseTestCase):
def runTest(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull('hello-world', stream=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class TestCommit(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
self.assertIn('Container', img)
self.assertTrue(img['Container'].startswith(id))
self.assertIn('ContainerConfig', img)
self.assertIn('Image', img['ContainerConfig'])
self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
self.assertIn('Parent', img)
self.assertEqual(img['Parent'], busybox_id)
class TestRemoveImage(BaseTestCase):
def runTest(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
self.client.remove_image(img_id, force=True)
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
self.assertEqual(len(res), 0)
##################
# IMPORT TESTS #
##################
class ImportTestCase(BaseTestCase):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
class TestImportFromBytes(ImportTestCase):
'''Tests importing an image from in-memory byte data.'''
def runTest(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromFile(ImportTestCase):
'''Tests importing an image from a tar file on disk.'''
def runTest(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromStream(ImportTestCase):
'''Tests importing an image from a stream containing tar data.'''
def runTest(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
class TestImportFromURL(ImportTestCase):
'''Tests downloading an image over HTTP.'''
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def runTest(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
TAR_SIZE = 10240
with self.dummy_tar_stream(n_bytes=TAR_SIZE) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
#################
# VOLUMES TESTS #
#################
@requires_api_version('1.21')
class TestVolumes(BaseTestCase):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
def test_create_volume_invalid_driver(self):
driver_name = 'invalid.driver'
with pytest.raises(docker.errors.NotFound):
self.client.create_volume('perfectcherryblossom', driver_name)
def test_list_volumes(self):
name = 'imperishablenight'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.volumes()
self.assertIn('Volumes', result)
volumes = result['Volumes']
self.assertIn(volume_info, volumes)
def test_inspect_volume(self):
name = 'embodimentofscarletdevil'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.inspect_volume(name)
self.assertEqual(volume_info, result)
def test_inspect_nonexistent_volume(self):
name = 'embodimentofscarletdevil'
with pytest.raises(docker.errors.NotFound):
self.client.inspect_volume(name)
def test_remove_volume(self):
name = 'shootthebullet'
self.tmp_volumes.append(name)
self.client.create_volume(name)
result = self.client.remove_volume(name)
self.assertTrue(result)
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
self.client.remove_volume(name)
#################
# BUILDER TESTS #
#################
class TestBuildStream(BaseTestCase):
def runTest(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
json.loads(chunk) # ensure chunk is a single, valid JSON blob
logs += chunk
self.assertNotEqual(logs, '')
class TestBuildFromStringIO(BaseTestCase):
def runTest(self):
if six.PY3:
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
logs += chunk
self.assertNotEqual(logs, '')
@requires_api_version('1.8')
class TestBuildWithDockerignore(Cleanup, BaseTestCase):
def runTest(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'MAINTAINER docker-py',
'ADD . /test',
]))
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write("\n".join([
'ignored',
'Dockerfile',
'.dockerignore',
'', # empty line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
tag=tag,
)
for chunk in stream:
pass
c = self.client.create_container(tag, ['ls', '-1A', '/test'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
if six.PY3:
logs = logs.decode('utf-8')
self.assertEqual(
list(filter(None, logs.split('\n'))),
['not-ignored'],
)
#######################
# PY SPECIFIC TESTS #
#######################
class TestRunShlex(BaseTestCase):
def runTest(self):
commands = [
'true',
'echo "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'echo -n "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'/bin/sh -c "echo Hello World"',
'/bin/sh -c \'echo "Hello World"\'',
'echo "\"Night of Nights\""',
'true && echo "Night of Nights"'
]
for cmd in commands:
container = self.client.create_container(BUSYBOX, cmd)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0, msg=cmd)
class TestLoadConfig(BaseTestCase):
def runTest(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(cfg_path, 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('Auth'), None)
class TestLoadJSONConfig(BaseTestCase):
def runTest(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(os.path.join(folder, '.dockercfg'), 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
email_ = 'sakuya@scarlet.net'
f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format(
docker.auth.INDEX_URL, auth_, email_))
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
cfg = cfg[docker.auth.INDEX_URL]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('Auth'), None)
class TestAutoDetectVersion(unittest.TestCase):
def test_client_init(self):
client = docker_client(version='auto')
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
def test_auto_client(self):
client = docker.AutoVersionClient(**docker_client_kwargs())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
with self.assertRaises(docker.errors.DockerException):
docker.AutoVersionClient(**docker_client_kwargs(version='1.11'))
class TestConnectionTimeout(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
timeout=self.timeout)
def runTest(self):
start = time.time()
res = None
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
except:
pass
end = time.time()
self.assertTrue(res is None)
self.assertTrue(end - start < 2 * self.timeout)
class UnixconnTestCase(unittest.TestCase):
"""
Test UNIX socket connection adapter.
"""
def test_resource_warnings(self):
"""
Test no warnings are produced when using the client.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
client = docker_client()
client.images()
client.close()
del client
assert len(w) == 0, \
"No warnings produced: {0}".format(w[0].message)
####################
# REGRESSION TESTS #
####################
class TestRegressions(BaseTestCase):
def test_443(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
self.assertEqual(exc.exception.response.status_code, 500)
dfile.close()
def test_542(self):
self.client.start(
self.client.create_container(BUSYBOX, ['true'])
)
result = self.client.containers(all=True, trunc=True)
self.assertEqual(len(result[0]['Id']), 12)
def test_647(self):
with self.assertRaises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649(self):
self.client.timeout = None
ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715(self):
ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
|
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Brain developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "brain.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
braind and brain-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run brainds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "braind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "brain-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in brain.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a braind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "braind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "brain-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple brainds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
|
import numpy as NP
import astropy.cosmology as CP
import scipy.constants as FCNST
import argparse
import yaml
import astropy
from astropy.io import fits, ascii
import progressbar as PGB
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CM
from matplotlib.ticker import FuncFormatter
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import ipdb as PDB
parser = argparse.ArgumentParser(description='Program to analyze and plot global EoR data')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-i', '--infile', dest='infile', default='/home/t_nithyanandan/codes/mine/python/interferometry/main/simparameters.yaml', type=file, required=False, help='File specifying input parameters')
args = vars(parser.parse_args())
rootdir = '/data3/t_nithyanandan/'
with args['infile'] as parms_file:
parms = yaml.safe_load(parms_file)
project = parms['project']
telescope_id = parms['telescope']['id']
Tsys = parms['telescope']['Tsys']
latitude = parms['telescope']['latitude']
pfb_method = parms['telescope']['pfb_method']
element_shape = parms['antenna']['shape']
element_size = parms['antenna']['size']
element_ocoords = parms['antenna']['ocoords']
element_orientation = parms['antenna']['orientation']
ground_plane = parms['antenna']['ground_plane']
phased_array = parms['antenna']['phased_array']
phased_elements_file = parms['phasedarray']['file']
delayerr = parms['phasedarray']['delayerr']
gainerr = parms['phasedarray']['gainerr']
nrand = parms['phasedarray']['nrand']
antenna_file = parms['array']['file']
array_layout = parms['array']['layout']
minR = parms['array']['minR']
maxR = parms['array']['maxR']
minbl = parms['baseline']['min']
maxbl = parms['baseline']['max']
bldirection = parms['baseline']['direction']
obs_mode = parms['obsparm']['obs_mode']
n_snaps = parms['obsparm']['n_snaps']
t_snap = parms['obsparm']['t_snap']
t_obs = parms['obsparm']['t_obs']
freq = parms['obsparm']['freq']
freq_resolution = parms['obsparm']['freq_resolution']
nchan = parms['obsparm']['nchan']
avg_drifts = parms['snapshot']['avg_drifts']
beam_switch = parms['snapshot']['beam_switch']
pick_snapshots = parms['snapshot']['pick']
all_snapshots = parms['snapshot']['all']
snapshots_range = parms['snapshot']['range']
pointing_file = parms['pointing']['file']
pointing_info = parms['pointing']['initial']
n_bins_baseline_orientation = parms['processing']['n_bins_blo']
baseline_chunk_size = parms['processing']['bl_chunk_size']
bl_chunk = parms['processing']['bl_chunk']
n_bl_chunks = parms['processing']['n_bl_chunks']
n_sky_sectors = parms['processing']['n_sky_sectors']
bpass_shape = parms['processing']['bpass_shape']
max_abs_delay = parms['processing']['max_abs_delay']
fg_str = parms['fgparm']['model']
nside = parms['fgparm']['nside']
spindex_rms = parms['fgparm']['spindex_rms']
spindex_seed = parms['fgparm']['spindex_seed']
pc = parms['phasing']['center']
pc_coords = parms['phasing']['coords']
if project not in ['project_MWA', 'project_global_EoR', 'project_HERA', 'project_drift_scan', 'project_beams', 'project_LSTbin']:
raise ValueError('Invalid project specified')
else:
project_dir = project + '/'
if telescope_id not in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'custom', 'paper_dipole', 'mwa_tools']:
raise ValueError('Invalid telescope specified')
if element_shape is None:
element_shape = 'delta'
elif element_shape not in ['dish', 'delta', 'dipole']:
raise ValueError('Invalid antenna element shape specified')
if element_shape != 'delta':
if element_size is None:
raise ValueError('No antenna element size specified')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive')
if element_ocoords not in ['altaz', 'dircos']:
if element_ocoords is not None:
raise ValueError('Antenna element orientation must be "altaz" or "dircos"')
if element_orientation is None:
if element_ocoords == 'altaz':
element_orientation = NP.asarray([0.0, 90.0])
elif element_ocoords == 'dircos':
element_orientation = NP.asarray([1.0, 0.0, 0.0])
else:
element_orientation = NP.asarray(element_orientation)
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
if not isinstance(phased_array, bool):
raise TypeError('phased_array specification must be boolean')
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
if (antenna_file is None) and (array_layout is None):
raise ValueError('One of antenna array file or layout must be specified')
if (antenna_file is not None) and (array_layout is not None):
raise ValueError('Only one of antenna array file or layout must be specified')
if antenna_file is not None:
try:
ant_info = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
except IOError:
raise IOError('Could not open file containing antenna locations.')
else:
if array_layout not in ['MWA-128T', 'HERA-7', 'HERA-19', 'HERA-37', 'HERA-61', 'HERA-91', 'HERA-127', 'HERA-169', 'HERA-217', 'HERA-271', 'HERA-331', 'CIRC']:
raise ValueError('Invalid array layout specified')
if array_layout == 'MWA-128T':
ant_info = NP.loadtxt('/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
elif array_layout == 'HERA-7':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=7)
elif array_layout == 'HERA-19':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=19)
elif array_layout == 'HERA-37':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=37)
elif array_layout == 'HERA-61':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=61)
elif array_layout == 'HERA-91':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=91)
elif array_layout == 'HERA-127':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=127)
elif array_layout == 'HERA-169':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=169)
elif array_layout == 'HERA-217':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=217)
elif array_layout == 'HERA-271':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=271)
elif array_layout == 'HERA-331':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=331)
elif array_layout == 'CIRC':
ant_locs, ant_id = RI.circular_antenna_array(element_size, minR, maxR=maxR)
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
if obs_mode is None:
obs_mode = 'custom'
elif obs_mode not in ['drift', 'track', 'dns']:
raise ValueError('Invalid observing mode specified')
if avg_drifts + beam_switch + (pick_snapshots is not None) + (snapshots_range is not None) + all_snapshots != 1:
raise ValueError('One and only one of avg_drifts, beam_switch, pick_snapshots, snapshots_range, all_snapshots must be set')
snapshot_type_str = ''
if avg_drifts and (obs_mode == 'dns'):
snapshot_type_str = 'drift_averaged_'
if beam_switch and (obs_mode == 'dns'):
snapshot_type_str = 'beam_switches_'
if (snapshots_range is not None) and ((obs_mode == 'dns') or (obs_mode == 'lstbin')):
snapshot_type_str = 'snaps_{0[0]:0d}-{0[1]:0d}_'.format(snapshots_range)
if (pointing_file is None) and (pointing_info is None):
raise ValueError('One and only one of pointing file and initial pointing must be specified')
elif (pointing_file is not None) and (pointing_info is not None):
raise ValueError('One and only one of pointing file and initial pointing must be specified')
duration_str = ''
if obs_mode in ['track', 'drift']:
if (t_snap is not None) and (n_snaps is not None):
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, t_snap)
geor_duration_str = '_{0:0d}x{1:.1f}s'.format(1, t_snap)
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_snaps is None:
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
pointings_altaz_orig = pointing_info_from_file[:,:2].reshape(-1,2)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and (avg_drifts or beam_switch):
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
# lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_snaps = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
if avg_drifts:
lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
else:
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_snaps
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_snaps = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
n_snaps = pick_snapshots.size
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
if obs_mode != 'lstbin':
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3
# n_snaps = t_snap.size
lst = 0.5 * (lst_begin + lst_end)
obs_mode = 'custom'
else:
t_snap = 112.0 + NP.zeros(n_snaps) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_snap/3.6e3 * 15.0
if pick_snapshots is None:
if obs_mode != 'lstbin':
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3
else:
t_snap = 112.0 + NP.zeros(n_snaps) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_snap/3.6e3 * 15.0
# pointings_dircos_orig = GEOM.altaz2dircos(pointings_altaz_orig, units='degrees')
# pointings_hadec_orig = GEOM.altaz2hadec(pointings_altaz_orig, latitude, units='degrees')
# pointings_radec_orig = NP.hstack(((lst-pointings_hadec_orig[:,0]).reshape(-1,1), pointings_hadec_orig[:,1].reshape(-1,1)))
# pointings_radec_orig[:,0] = pointings_radec_orig[:,0] % 360.0
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
t_obs = NP.sum(t_snap)
elif pointing_info is not None:
pointing_init = NP.asarray(pointing_info[1:])
lst_init = pointing_info[0]
pointing_file = None
if t_snap is None:
raise NameError('t_snap must be provided for an automated observing run')
if (n_snaps is None) and (t_obs is None):
raise NameError('n_snaps or t_obs must be provided for an automated observing run')
elif (n_snaps is not None) and (t_obs is not None):
raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')
elif n_snaps is None:
n_snaps = int(t_obs/t_snap)
else:
t_obs = n_snaps * t_snap
t_snap = t_snap + NP.zeros(n_snaps)
lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
if obs_mode is None:
obs_mode = 'track'
if obs_mode == 'track':
pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)
else:
ha_init = lst_init * 15.0 - pointing_init[0]
pointings_radec = NP.hstack((NP.asarray(lst-ha_init).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec_orig = NP.copy(pointings_radec)
pointings_hadec_orig = NP.copy(pointings_hadec)
pointings_altaz_orig = NP.copy(pointings_altaz)
pointings_dircos_orig = NP.copy(pointings_dircos)
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
if lst_wrapped.size > 1:
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
else:
lst_edges = NP.concatenate((lst_wrapped, lst_wrapped+t_snap/3.6e3*15))
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, t_snap[0])
geor_duration_str = '_{0:0d}x{1:.1f}s'.format(1, t_snap[0])
bl, bl_id = RI.baseline_generator(ant_locs, ant_id=ant_id, auto=False, conjugate=False)
bl, select_bl_ind, bl_count = RI.uniq_baselines(bl)
bl_id = bl_id[select_bl_ind]
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
bl_count = bl_count[sortind]
neg_bl_orientation_ind = (bl_orientation < -67.5) | (bl_orientation > 112.5)
# neg_bl_orientation_ind = NP.logical_or(bl_orientation < -0.5*180.0/n_bins_baseline_orientation, bl_orientation > 180.0 - 0.5*180.0/n_bins_baseline_orientation)
bl[neg_bl_orientation_ind,:] = -1.0 * bl[neg_bl_orientation_ind,:]
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
if minbl is None:
minbl = 0.0
elif not isinstance(minbl, (int,float)):
raise TypeError('Minimum baseline length must be a scalar')
elif minbl < 0.0:
minbl = 0.0
if maxbl is None:
maxbl = bl_length.max()
elif not isinstance(maxbl, (int,float)):
raise TypeError('Maximum baseline length must be a scalar')
elif maxbl < minbl:
maxbl = bl_length.max()
min_blo = -67.5
max_blo = 112.5
select_bl_ind = NP.zeros(bl_length.size, dtype=NP.bool)
if bldirection is not None:
if isinstance(bldirection, str):
if bldirection not in ['SE', 'E', 'NE', 'N']:
raise ValueError('Invalid baseline direction criterion specified')
else:
bldirection = [bldirection]
if isinstance(bldirection, list):
for direction in bldirection:
if direction in ['SE', 'E', 'NE', 'N']:
if direction == 'SE':
oind = (bl_orientation >= -67.5) & (bl_orientation < -22.5)
select_bl_ind[oind] = True
elif direction == 'E':
oind = (bl_orientation >= -22.5) & (bl_orientation < 22.5)
select_bl_ind[oind] = True
elif direction == 'NE':
oind = (bl_orientation >= 22.5) & (bl_orientation < 67.5)
select_bl_ind[oind] = True
else:
oind = (bl_orientation >= 67.5) & (bl_orientation < 112.5)
select_bl_ind[oind] = True
else:
raise TypeError('Baseline direction criterion must specified as string or list of strings')
else:
select_bl_ind = NP.ones(bl_length.size, dtype=NP.bool)
select_bl_ind = select_bl_ind & (bl_length >= minbl) & (bl_length <= maxbl)
bl_id = bl_id[select_bl_ind]
bl = bl[select_bl_ind,:]
bl_length = bl_length[select_bl_ind]
bl_orientation = bl_orientation[select_bl_ind]
total_baselines = bl_length.size
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
bllstr = map(str, bl_length)
uniq_bllstr, ind_uniq_bll = NP.unique(bllstr, return_index=True)
count_uniq_bll = [bllstr.count(ubll) for ubll in uniq_bllstr]
count_uniq_bll = NP.asarray(count_uniq_bll)
geor_bl = bl[ind_uniq_bll,:]
geor_bl_id = bl_id[ind_uniq_bll]
geor_bl_orientation = bl_orientation[ind_uniq_bll]
geor_bl_length = bl_length[ind_uniq_bll]
sortind = NP.argsort(geor_bl_length, kind='mergesort')
geor_bl = geor_bl[sortind,:]
geor_bl_id = geor_bl_id[sortind]
geor_bl_length = geor_bl_length[sortind]
geor_bl_orientation = geor_bl_orientation[sortind]
count_uniq_bll = count_uniq_bll[sortind]
use_GSM = False
use_DSM = False
use_CSM = False
use_SUMSS = False
use_GLEAM = False
use_USM = False
use_NVSS = False
use_HI_monopole = False
use_HI_cube = False
use_HI_fluctuations = False
if fg_str not in ['asm', 'dsm', 'csm', 'nvss', 'sumss', 'gleam', 'mwacs', 'ps', 'usm', 'mss', 'HI_cube', 'HI_monopole', 'HI_fluctuations']:
raise ValueError('Invalid foreground model string specified.')
if fg_str == 'asm':
use_GSM = True
elif fg_str == 'dsm':
use_DSM = True
elif fg_str == 'csm':
use_CSM = True
elif fg_str == 'sumss':
use_SUMSS = True
elif fg_str == 'gleam':
use_GLEAM = True
elif fg_str == 'point':
use_PS = True
elif fg_str == 'nvss':
use_NVSS = True
elif fg_str == 'usm':
use_USM = True
elif fg_str == 'HI_monopole':
use_HI_monopole = True
elif fg_str == 'HI_fluctuations':
use_HI_fluctuations = True
elif fg_str == 'HI_cube':
use_HI_cube = True
spindex_seed_str = ''
if not isinstance(spindex_rms, (int,float)):
raise TypeError('Spectral Index rms must be a scalar')
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
if not isinstance(spindex_seed, (int, float)):
raise TypeError('Spectral index random seed must be a scalar')
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
freq = NP.float(freq)
freq_resolution = NP.float(freq_resolution)
wavelength = FCNST.c / freq # in meters
redshift = CNST.rest_freq_HI / freq - 1
bw = nchan * freq_resolution
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
if bpass_shape not in ['bnw', 'bhw', 'rect']:
raise ValueError('Invalid bandpass shape specified')
if pc_coords not in ['altaz', 'radec', 'hadec', 'dircos']:
raise ValueError('Invalid coordinate system specified for phase center')
else:
pc = NP.asarray(pc).ravel()
if pc_coords == 'radec':
if pc.size != 2:
raise ValueError('Phase center must be a 2-element vector')
pc_hadec = NP.hstack((lst.reshape(-1,1)-pc[0], pc[1]+NP.zeros((lst.size,1))))
pc_altaz = GEOM.hadec2altaz(pc_hadec, latitude, units='degrees')
pc_dircos = GEOM.altaz2dircos(pc_altaz, units='degrees')
elif pc_coords == 'hadec':
if pc.size != 2:
raise ValueError('Phase center must be a 2-element vector')
pc_altaz = GEOM.hadec2altaz(pc.reshape(1,-1), latitude, units='degrees')
pc_dircos = GEOM.altaz2dircos(pc_altaz, units='degrees')
elif pc_coords == 'altaz':
if pc.size != 2:
raise ValueError('Phase center must be a 2-element vector')
pc_dircos = GEOM.altaz2dircos(pc.reshape(1,-1), units='degrees')
else:
if pc.size != 3:
raise ValueError('Phase center must be a 3-element vector in dircos coordinates')
pc_coords = NP.asarray(pc).reshape(1,-1)
if pfb_method is not None:
use_pfb = True
else:
use_pfb = False
h = 0.7 # Hubble constant coefficient
cosmodel100 = CP.FlatLambdaCDM(H0=100.0, Om0=0.27) # Using H0 = 100 km/s/Mpc
cosmodel = CP.FlatLambdaCDM(H0=h*100.0, Om0=0.27) # Using H0 = h * 100 km/s/Mpc
def kprll(eta, z):
return 2 * NP.pi * eta * cosmodel100.H0.value * CNST.rest_freq_HI * cosmodel100.efunc(z) / FCNST.c / (1+z)**2 * 1e3
def kperp(u, z):
return 2 * NP.pi * u / cosmodel100.comoving_transverse_distance(z).value
geor_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+geor_duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'HI_monopole'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb.fits'
fg_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb.fits'
geor_clean_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+geor_duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'HI_monopole'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb_'+bpass_shape+'.fits'
fg_clean_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb_'+bpass_shape+'.fits'
PDB.set_trace()
ia = RI.InterferometerArray(None, None, None, init_file=fg_infile)
hdulist = fits.open(geor_clean_infile)
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
geor_cc_skyvis_lag = hdulist['CLEAN NOISELESS DELAY SPECTRA REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA IMAG'].data
geor_cc_skyvis_lag_res = hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(fg_clean_infile)
fg_cc_skyvis_lag = hdulist['CLEAN NOISELESS DELAY SPECTRA REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA IMAG'].data
fg_cc_skyvis_lag_res = hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS IMAG'].data
hdulist.close()
geor_cc_skyvis_lag += geor_cc_skyvis_lag_res
fg_cc_skyvis_lag += fg_cc_skyvis_lag_res
geor_cc_skyvis_lag = DSP.downsampler(geor_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
fg_cc_skyvis_lag = DSP.downsampler(fg_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
fg_cc_skyvis_lag_res = DSP.downsampler(fg_cc_skyvis_lag_res, 1.0*clean_lags.size/ia.lags.size, axis=1)
clean_lags_orig = NP.copy(clean_lags)
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
delaymat = DLY.delay_envelope(bl, pc_dircos, units='mks')
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, bl.shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+1/bw, clags < min_delay-1/bw)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
bll_bin_count, bll_edges, bll_binnum, bll_ri = OPS.binned_statistic(bl_length, values=None, statistic='count', bins=NP.hstack((geor_bl_length-1e-10, geor_bl_length.max()+1e-10)))
snap_min = 0
snap_max = 39
fg_cc_skyvis_lag_tavg = NP.mean(fg_cc_skyvis_lag[:,:,snap_min:snap_max+1], axis=2)
fg_cc_skyvis_lag_res_tavg = NP.mean(fg_cc_skyvis_lag_res[:,:,snap_min:snap_max+1], axis=2)
fg_cc_skyvis_lag_blavg = NP.zeros((geor_bl_length.size, clags.size, snap_max-snap_min+1), dtype=NP.complex64)
fg_cc_skyvis_lag_res_blavg = NP.zeros((geor_bl_length.size, clags.size, snap_max-snap_min+1), dtype=NP.complex64)
for i in xrange(geor_bl_length.size):
blind = bll_ri[bll_ri[i]:bll_ri[i+1]]
if blind.size != bll_bin_count[i]: PDB.set_trace()
fg_cc_skyvis_lag_blavg[i,:,:] = NP.mean(fg_cc_skyvis_lag[blind,:,snap_min:snap_max+1], axis=0)
fg_cc_skyvis_lag_res_blavg[i,:,:] = NP.mean(fg_cc_skyvis_lag_res[blind,:,snap_min:snap_max+1], axis=0)
fg_cc_skyvis_lag_avg = NP.mean(fg_cc_skyvis_lag_blavg, axis=2)
fg_cc_skyvis_lag_res_avg = NP.mean(fg_cc_skyvis_lag_res_blavg, axis=2)
for i in xrange(int(NP.ceil(geor_bl_length.size/4.0))):
fig, axs = PLT.subplots(min(4,geor_bl_length.size-4*i), sharex=True, figsize=(6,9))
for j in range(4*i, min(4*(i+1),geor_bl_length.size)):
blind = bll_ri[bll_ri[j]:bll_ri[j+1]]
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag[blind[0],:,0]), ls='--', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_blavg[j,:,0]), ls='-.', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_tavg[blind[0],:]), ls=':', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_avg[j,:]), ls='-', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(geor_cc_skyvis_lag[j,:,0]), ls='-', lw=2, color='gray')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_res_avg[j,:]), ls=':', lw=2, color='red')
axs[j%len(axs)].axvline(x=1e6*min_delay[blind[0],0], ls=':', lw=2, color='gray')
axs[j%len(axs)].axvline(x=1e6*max_delay[blind[0],0], ls=':', lw=2, color='gray')
axs[j%len(axs)].text(0.05, 0.8, r'$|\mathbf{b}|$'+' = {0:.1f} m'.format(geor_bl_length[j]), fontsize=12, weight='medium', transform=axs[j%len(axs)].transAxes)
axs[j%len(axs)].set_ylim(NP.abs(geor_cc_skyvis_lag).min(), NP.abs(fg_cc_skyvis_lag[:,:,snap_min:snap_max+1]).max())
axs[j%len(axs)].set_xlim(1e6*clags.min(), 1e6*clags.max())
axs[j%len(axs)].set_yscale('log')
axs[j%len(axs)].set_yticks(NP.logspace(4,12,5,endpoint=True).tolist())
if j%len(axs) == len(axs)-1:
axs[j%len(axs)].set_xlabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
if j%len(axs) == 0:
axs_kprll = axs[j%len(axs)].twiny()
axs_kprll.set_xticks(kprll(axs[j%len(axs)].get_xticks()*1e-6, redshift))
axs_kprll.set_xlim(kprll(NP.asarray(axs[j%len(axs)].get_xlim())*1e-6, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.2f}'.format(x))
axs_kprll.xaxis.set_major_formatter(xformatter)
axs_kprll.xaxis.tick_top()
axs_kprll.set_xlabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
axs_kprll.xaxis.set_label_position('top')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r"$|V_b|$ [Jy Hz]", fontsize=16, weight='medium', labelpad=30)
PLT.savefig(rootdir+project_dir+'figures/'+telescope_str+'delay_spectra_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(geor_bl_length[4*i],geor_bl_length[j])+fg_str+'_nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb_'+bpass_shape+'.png', bbox_inches=0)
# fig = PLT.figure(figsize=(6,6))
# ax = fig.add_subplot(111)
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag[-1,:,0]), ls='--', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_blavg[-1,:,0]), ls='-.', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_tavg[-1,:]), ls=':', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_avg[-1,:]), ls='-', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(geor_cc_skyvis_lag[-1,:,0]), ls='-', lw=2, color='gray')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_res_avg[-1,:]), ls='-', lw=2, color='red')
# ax.set_ylim(NP.abs(geor_cc_skyvis_lag).min(), NP.abs(fg_cc_skyvis_lag).max())
# ax.set_xlim(1e6*clags.min(), 1e6*clags.max())
# ax.set_yscale('log')
# ax.set_xlabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
# ax.set_ylabel(r'$|V_b|$'+' [Jy Hz]', fontsize=16, weight='medium')
# PLT.show()
PDB.set_trace()
|
|
""" This module attempts to make it easy to create VTK-Python
unittests. The module uses unittest for the test interface. For more
documentation on what unittests are and how to use them, please read
these:
http://www.python.org/doc/current/lib/module-unittest.html
http://www.diveintopython.org/roman_divein.html
This VTK-Python test module supports image based tests with multiple
images per test suite and multiple images per individual test as well.
It also prints information appropriate for CDash
(http://open.kitware.com/).
This module defines several useful classes and functions to make
writing tests easy. The most important of these are:
class vtkTest:
Subclass this for your tests. It also has a few useful internal
functions that can be used to do some simple blackbox testing.
compareImage(renwin, img_fname, threshold=10):
Compares renwin with image and generates image if it does not
exist. The threshold determines how closely the images must match.
The function also handles multiple images and finds the best
matching image.
compareImageWithSavedImage(src_img, img_fname, threshold=10):
Compares given source image (in the form of a vtkImageData) with
saved image and generates the image if it does not exist. The
threshold determines how closely the images must match. The
function also handles multiple images and finds the best matching
image.
getAbsImagePath(img_basename):
Returns the full path to the image given the basic image name.
main(cases):
Does the testing given a list of tuples containing test classes and
the starting string of the functions used for testing.
interact():
Interacts with the user if necessary. The behavior of this is
rather trivial and works best when using Tkinter. It does not do
anything by default and stops to interact with the user when given
the appropriate command line arguments.
isInteractive():
If interact() is not good enough, use this to find if the mode is
interactive or not and do whatever is necessary to generate an
interactive view.
Examples:
The best way to learn on how to use this module is to look at a few
examples. The end of this file contains a trivial example. Please
also look at the following examples:
Rendering/Testing/Python/TestTkRenderWidget.py,
Rendering/Testing/Python/TestTkRenderWindowInteractor.py
Created: September, 2002
Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
"""
from __future__ import absolute_import
import sys, os, time
import os.path
import unittest, getopt
import vtk
from . import BlackBox
# location of the VTK data files. Set via command line args or
# environment variable.
VTK_DATA_ROOT = ""
# location of the VTK baseline images. Set via command line args or
# environment variable.
VTK_BASELINE_ROOT = ""
# location of the VTK difference images for failed tests. Set via
# command line args or environment variable.
VTK_TEMP_DIR = ""
# Verbosity of the test messages (used by unittest)
_VERBOSE = 0
# Determines if it is necessary to interact with the user. If zero
# dont interact if 1 interact. Set via command line args
_INTERACT = 0
# This will be set to 1 when the image test will not be performed.
# This option is used internally by the script and set via command
# line arguments.
_NO_IMAGE = 0
class vtkTest(unittest.TestCase):
"""A simple default VTK test class that defines a few useful
blackbox tests that can be readily used. Derive your test cases
from this class and use the following if you'd like to.
Note: Unittest instantiates this class (or your subclass) each
time it tests a method. So if you do not want that to happen when
generating VTK pipelines you should create the pipeline in the
class definition as done below for _blackbox.
"""
_blackbox = BlackBox.Tester(debug=0)
# Due to what seems to be a bug in python some objects leak.
# Avoid the exit-with-error in vtkDebugLeaks.
dl = vtk.vtkDebugLeaks()
dl.SetExitError(0)
dl = None
def _testParse(self, obj):
"""Does a blackbox test by attempting to parse the class for
its various methods using vtkMethodParser. This is a useful
test because it gets all the methods of the vtkObject, parses
them and sorts them into different classes of objects."""
self._blackbox.testParse(obj)
def _testGetSet(self, obj, excluded_methods=[]):
"""Checks the Get/Set method pairs by setting the value using
the current state and making sure that it equals the value it
was originally. This effectively calls _testParse
internally. """
self._blackbox.testGetSet(obj, excluded_methods)
def _testBoolean(self, obj, excluded_methods=[]):
"""Checks the Boolean methods by setting the value on and off
and making sure that the GetMethod returns the the set value.
This effectively calls _testParse internally. """
self._blackbox.testBoolean(obj, excluded_methods)
def interact():
"""Interacts with the user if necessary. """
global _INTERACT
if _INTERACT:
raw_input("\nPress Enter/Return to continue with the testing. --> ")
def isInteractive():
"""Returns if the currently chosen mode is interactive or not
based on command line options."""
return _INTERACT
def getAbsImagePath(img_basename):
"""Returns the full path to the image given the basic image
name."""
global VTK_BASELINE_ROOT
return os.path.join(VTK_BASELINE_ROOT, img_basename)
def _getTempImagePath(img_fname):
x = os.path.join(VTK_TEMP_DIR, os.path.split(img_fname)[1])
return os.path.abspath(x)
def compareImageWithSavedImage(src_img, img_fname, threshold=10):
"""Compares a source image (src_img, which is a vtkImageData) with
the saved image file whose name is given in the second argument.
If the image file does not exist the image is generated and
stored. If not the source image is compared to that of the
figure. This function also handles multiple images and finds the
best matching image.
"""
global _NO_IMAGE
if _NO_IMAGE:
return
f_base, f_ext = os.path.splitext(img_fname)
if not os.path.isfile(img_fname):
# generate the image
pngw = vtk.vtkPNGWriter()
pngw.SetFileName(_getTempImagePath(img_fname))
pngw.SetInputConnection(src_img.GetOutputPort())
pngw.Write()
_printCDashImageNotFoundError(img_fname)
msg = "Missing baseline image: " + img_fname + "\nTest image created: " + _getTempImagePath(img_fname)
sys.tracebacklimit = 0
raise RuntimeError(msg)
pngr = vtk.vtkPNGReader()
pngr.SetFileName(img_fname)
pngr.Update()
idiff = vtk.vtkImageDifference()
idiff.SetInputConnection(src_img.GetOutputPort())
idiff.SetImageConnection(pngr.GetOutputPort())
idiff.Update()
min_err = idiff.GetThresholdedError()
img_err = min_err
err_index = 0
count = 0
if min_err > threshold:
count = 1
test_failed = 1
err_index = -1
while 1: # keep trying images till we get the best match.
new_fname = f_base + "_%d.png"%count
if not os.path.exists(new_fname):
# no other image exists.
break
# since file exists check if it matches.
pngr.SetFileName(new_fname)
pngr.Update()
idiff.Update()
alt_err = idiff.GetThresholdedError()
if alt_err < threshold:
# matched,
err_index = count
test_failed = 0
min_err = alt_err
img_err = alt_err
break
else:
if alt_err < min_err:
# image is a better match.
err_index = count
min_err = alt_err
img_err = alt_err
count = count + 1
# closes while loop.
if test_failed:
_handleFailedImage(idiff, pngr, img_fname)
# Print for CDash.
_printCDashImageError(img_err, err_index, f_base)
msg = "Failed image test: %f\n"%idiff.GetThresholdedError()
sys.tracebacklimit = 0
raise RuntimeError(msg)
# output the image error even if a test passed
_printCDashImageSuccess(img_err, err_index)
def compareImage(renwin, img_fname, threshold=10):
"""Compares renwin's (a vtkRenderWindow) contents with the image
file whose name is given in the second argument. If the image
file does not exist the image is generated and stored. If not the
image in the render window is compared to that of the figure.
This function also handles multiple images and finds the best
matching image. """
global _NO_IMAGE
if _NO_IMAGE:
return
w2if = vtk.vtkWindowToImageFilter()
w2if.ReadFrontBufferOff()
w2if.SetInput(renwin)
w2if.Update()
try:
compareImageWithSavedImage(w2if, img_fname, threshold)
except RuntimeError:
w2if.ReadFrontBufferOn()
compareImageWithSavedImage(w2if, img_fname, threshold)
return
def _printCDashImageError(img_err, err_index, img_base):
"""Prints the XML data necessary for CDash."""
img_base = _getTempImagePath(img_base)
print("Failed image test with error: %f"%img_err)
print("<DartMeasurement name=\"ImageError\" type=\"numeric/double\"> "
"%f </DartMeasurement>"%img_err)
if err_index <= 0:
print("<DartMeasurement name=\"BaselineImage\" type=\"text/string\">Standard</DartMeasurement>")
else:
print("<DartMeasurement name=\"BaselineImage\" type=\"numeric/integer\"> "
"%d </DartMeasurement>"%err_index)
print("<DartMeasurementFile name=\"TestImage\" type=\"image/png\"> "
"%s </DartMeasurementFile>"%(img_base + '.png'))
print("<DartMeasurementFile name=\"DifferenceImage\" type=\"image/png\"> "
"%s </DartMeasurementFile>"%(img_base + '.diff.png'))
print("<DartMeasurementFile name=\"ValidImage\" type=\"image/png\"> "
"%s </DartMeasurementFile>"%(img_base + '.valid.png'))
def _printCDashImageNotFoundError(img_fname):
"""Prints the XML data necessary for Dart when the baseline image is not found."""
print("<DartMeasurement name=\"ImageNotFound\" type=\"text/string\">" + img_fname + "</DartMeasurement>")
def _printCDashImageSuccess(img_err, err_index):
"Prints XML data for Dart when image test succeeded."
print("<DartMeasurement name=\"ImageError\" type=\"numeric/double\"> "
"%f </DartMeasurement>"%img_err)
if err_index <= 0:
print("<DartMeasurement name=\"BaselineImage\" type=\"text/string\">Standard</DartMeasurement>")
else:
print("<DartMeasurement name=\"BaselineImage\" type=\"numeric/integer\"> "
"%d </DartMeasurement>"%err_index)
def _handleFailedImage(idiff, pngr, img_fname):
"""Writes all the necessary images when an image comparison
failed."""
f_base, f_ext = os.path.splitext(img_fname)
# write the difference image gamma adjusted for the dashboard.
gamma = vtk.vtkImageShiftScale()
gamma.SetInputConnection(idiff.GetOutputPort())
gamma.SetShift(0)
gamma.SetScale(10)
pngw = vtk.vtkPNGWriter()
pngw.SetFileName(_getTempImagePath(f_base + ".diff.png"))
pngw.SetInputConnection(gamma.GetOutputPort())
pngw.Write()
# Write out the image that was generated. Write it out as full so that
# it may be used as a baseline image if the tester deems it valid.
pngw.SetInputConnection(idiff.GetInputConnection(0,0))
pngw.SetFileName(_getTempImagePath(f_base + ".png"))
pngw.Write()
# write out the valid image that matched.
pngw.SetInputConnection(idiff.GetInputConnection(1,0))
pngw.SetFileName(_getTempImagePath(f_base + ".valid.png"))
pngw.Write()
def main(cases):
""" Pass a list of tuples containing test classes and the starting
string of the functions used for testing.
Example:
main ([(vtkTestClass, 'test'), (vtkTestClass1, 'test')])
"""
processCmdLine()
timer = vtk.vtkTimerLog()
s_time = timer.GetCPUTime()
s_wall_time = time.time()
# run the tests
result = test(cases)
tot_time = timer.GetCPUTime() - s_time
tot_wall_time = float(time.time() - s_wall_time)
# output measurements for CDash
print("<DartMeasurement name=\"WallTime\" type=\"numeric/double\"> "
" %f </DartMeasurement>"%tot_wall_time)
print("<DartMeasurement name=\"CPUTime\" type=\"numeric/double\"> "
" %f </DartMeasurement>"%tot_time)
# Delete these to eliminate debug leaks warnings.
del cases, timer
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def test(cases):
""" Pass a list of tuples containing test classes and the
functions used for testing.
It returns a unittest._TextTestResult object.
Example:
test = test_suite([(vtkTestClass, 'test'),
(vtkTestClass1, 'test')])
"""
# Make the test suites from the arguments.
suites = []
for case in cases:
suites.append(unittest.makeSuite(case[0], case[1]))
test_suite = unittest.TestSuite(suites)
# Now run the tests.
runner = unittest.TextTestRunner(verbosity=_VERBOSE)
result = runner.run(test_suite)
return result
def usage():
msg="""Usage:\nTestScript.py [options]\nWhere options are:\n
-D /path/to/VTKData
--data-dir /path/to/VTKData
Directory containing VTK Data use for tests. If this option
is not set via the command line the environment variable
VTK_DATA_ROOT is used. If the environment variable is not
set the value defaults to '../../../../../VTKData'.
-B /path/to/valid/image_dir/
--baseline-root /path/to/valid/image_dir/
This is a path to the directory containing the valid images
for comparison. If this option is not set via the command
line the environment variable VTK_BASELINE_ROOT is used. If
the environment variable is not set the value defaults to
the same value set for -D (--data-dir).
-T /path/to/valid/temporary_dir/
--temp-dir /path/to/valid/temporary_dir/
This is a path to the directory where the image differences
are written. If this option is not set via the command line
the environment variable VTK_TEMP_DIR is used. If the
environment variable is not set the value defaults to
'../../../../Testing/Temporary'.
-v level
--verbose level
Sets the verbosity of the test runner. Valid values are 0,
1, and 2 in increasing order of verbosity.
-I
--interact
Interacts with the user when chosen. If this is not chosen
the test will run and exit as soon as it is finished. When
enabled, the behavior of this is rather trivial and works
best when the test uses Tkinter.
-n
--no-image
Does not do any image comparisons. This is useful if you
want to run the test and not worry about test images or
image failures etc.
-h
--help
Prints this message.
"""
return msg
def parseCmdLine():
arguments = sys.argv[1:]
options = "B:D:T:v:hnI"
long_options = ['baseline-root=', 'data-dir=', 'temp-dir=',
'verbose=', 'help', 'no-image', 'interact']
try:
opts, args = getopt.getopt(arguments, options, long_options)
except getopt.error as msg:
print(usage())
print('-'*70)
print(msg)
sys.exit (1)
return opts, args
def processCmdLine():
opts, args = parseCmdLine()
global VTK_DATA_ROOT, VTK_BASELINE_ROOT, VTK_TEMP_DIR
global _VERBOSE, _NO_IMAGE, _INTERACT
# setup defaults
try:
VTK_DATA_ROOT = os.environ['VTK_DATA_ROOT']
except KeyError:
VTK_DATA_ROOT = os.path.normpath("../../../../../VTKData")
try:
VTK_BASELINE_ROOT = os.environ['VTK_BASELINE_ROOT']
except KeyError:
pass
try:
VTK_TEMP_DIR = os.environ['VTK_TEMP_DIR']
except KeyError:
VTK_TEMP_DIR = os.path.normpath("../../../../Testing/Temporary")
for o, a in opts:
if o in ('-D', '--data-dir'):
VTK_DATA_ROOT = os.path.abspath(a)
if o in ('-B', '--baseline-root'):
VTK_BASELINE_ROOT = os.path.abspath(a)
if o in ('-T', '--temp-dir'):
VTK_TEMP_DIR = os.path.abspath(a)
if o in ('-n', '--no-image'):
_NO_IMAGE = 1
if o in ('-I', '--interact'):
_INTERACT = 1
if o in ('-v', '--verbose'):
try:
_VERBOSE = int(a)
except:
msg="Verbosity should be an integer. 0, 1, 2 are valid."
print(msg)
sys.exit(1)
if o in ('-h', '--help'):
print(usage())
sys.exit()
if not VTK_BASELINE_ROOT: # default value.
VTK_BASELINE_ROOT = VTK_DATA_ROOT
if __name__ == "__main__":
######################################################################
# A Trivial test case to illustrate how this module works.
class SampleTest(vtkTest):
obj = vtk.vtkActor()
def testParse(self):
"Test if class is parseable"
self._testParse(self.obj)
def testGetSet(self):
"Testing Get/Set methods"
self._testGetSet(self.obj)
def testBoolean(self):
"Testing Boolean methods"
self._testBoolean(self.obj)
# Test with the above trivial sample test.
main( [ (SampleTest, 'test') ] )
|
|
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
from _hashlib import new
'''
Created on Jan 10, 2021
@author: 666973 Surbhi Gupta
'''
from dateutil import parser
import datetime
from BaseAgent import BaseAgent
import time
import calendar
import random
import string
import os
import sys
import uuid
import json
import logging.handlers
class DummyDataAgent(BaseAgent):
def process(self):
# Project Variables
self.project_names = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator']
self.projectKeys = ['PS', 'MS', 'CF', 'AL']
self.projectIds = ["6", '7', '8', '9']
self.boardIdForProjects = [101, 201, 301, 401]
self.startFrom = self.config.get("startFrom")
self.startDate = parser.parse(self.startFrom, ignoretz=True)
# JIRA Variables
self.numberofRelease = self.config.get("numberofReleasesRequired")
self.numberOfSprintsInRelease = self.config.get("numberOfSprintsInRelease")
self.numberofDaysInSprint = self.config.get("numberofDaysInSprint")
self.Priority = ['High', 'Low', 'Medium']
self.jiraUsers = ["Vicky" , "Sam", "John", "Tom", "Adam"]
self.issueTypes = ['Story', 'Task']
self.storyPoints = ["8" , "5", "3", "2", "1"]
self.isStoryClosed = ["True", "False"]
# GIT Variables
self.repo = ['Insights', 'Spinnaker', 'OnBot', 'BuildOn']
self.author = ['John', 'Bruno', 'Charlie', 'Tom', 'Wilson']
self.branches = ['NewModules', 'BugFixes', 'Enhancements', 'Testing']
self.master_branches = ['InsightsEnterprise', 'master']
# JENKINS Variables
self.jenkins_status = ['Success', 'Failed', 'Aborted', 'Unstable']
# self.jenkins_status = ['Success']
self.master = ['master1', 'master2']
self.job_name = ['BillingApproved', 'BillingInvoice', 'ClaimValidated', 'ClaimProcessed', 'deploy']
self.jen_env = ['PROD', 'DEV', 'INT', 'RELEASE']
self.buildUrl = ['productv4.1.devops.com', 'productv4.2.devops.com', 'productv4.3.devops.com', 'productv4.4.devops.com']
# SONAR Variables
self.sonar_quality_gate_Status = ['OK', 'ERROR']
self.sonar_coverage = ['35', '50', '70', '85']
self.sonar_complexity = ['35', '50', '70', '85', '100', '125']
self.sonar_duplicate = ['15', '25', '45', '60']
self.sonar_techdepth = ['3', '5', '17', '25', '21']
self.resourceKey = ['09', '099', '89', '32']
self.sonar_codeCoverage = ['40', '50', '60', '70', '80', '90']
# RUNDECK Variables
self.rundeck_env = ['PROD', 'DEV', 'INTG', 'SIT', 'UAT']
self.rundeck_status = ['succeeded', 'failed', 'aborted']
# Nexus Variables
# self.artifacts_name = ["onlinebanking-0.0.1-20160505.105537-19.war","anybank-0.0.1-20160526.093210-15.war","demoapp-0.0.1-20160425.162709-16.war","demomavenapp-0.0.1-20160926.081623-11.war","iSightonlinebanking-0.0.1-20161213.042537-15.war","demomavenappxl-0.0.1-20161213.140419-19.war" ]
self.nexus_status = ['succeeded', 'failed', 'aborted']
self.artifacts_id = ["Service", "UI", "Engine", "Webhook", "Mockserver", "Workflow"]
self.group_ids = ["com.cts.paymentService", 'com.cts.mobileServices', 'com.cts.claimFinder', 'com.cts.agentLocator']
self.repoIdSnapshot = ["PaymentService-buildOn", "MobileServices-buildOn", "ClaimFinder-buildOn", "AgentLocator-buildOn"]
self.repoIdRelease = ["PaymentService-Release", "MobileServices-Release", "ClaimFinder-Release", "AgentLocator-Release"]
# QTest Variables
self.module = ["Correlation Builder", " Webhook Module", "Agent Management", "Data Archival"]
self.assignedto = ['Charlie', 'Steve', 'Andrew', 'Ricky']
self.severity = ['Critical', 'Non-Critical']
self.submitter = ['Tony', 'John', 'Adam', 'Sumit', 'Jack']
self.releaseStatus = ["Success" , "Bug Raised"] #, "Rollback"
# Looping over each project
try :
for projectName in self.project_names :
self.projectName = projectName
self.releaseBugData = []
self.createProjectData()
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)#
def createProjectData (self):
try :
self.projectKey = self.projectKeys[self.project_names.index(self.projectName)]
self.projectId = self.projectIds[self.project_names.index(self.projectName)]
self.groupId = self.group_ids[self.project_names.index(self.projectName)]
self.noOfSprintsClosedSoFar = 0 # To Track Total Number of Sprints closed for each project
self.jenkinsBuildNumber = 01 # Total Number of Builds trigerred in each project
self.pullRequestNo = 1 # Total Number of pull requests raised in each project
time_offset_hours = (random.randint(01, 04))
time_offset_seconds = (random.randint(101, 800))
self.releaseStartDate = self.startDate #datetime.datetime.now() - datetime.timedelta(days=self.start_date_days)
self.sprintEndDate = self.startDate #datetime.datetime.now() - datetime.timedelta(days=self.start_date_days)
self.sprintCompleteDate = self.sprintEndDate
self.releaseEndDate = (self.releaseStartDate + self.numberOfSprintsInRelease *(datetime.timedelta(days=self.numberofDaysInSprint)) )
self.noOfEpicsCreatedSoFar = 0 # Total Number of epics created for each project
self.issueCreationStarted = (5 * self.numberofRelease) + 1 # To track starting points of the issue keys, as max epics can be created is 5 in a relase..It will track further ekys as well.
release = 1
#self.releaseDate =
while release <= (self.numberofRelease) :
self.startReleaseWork(release)
release = release + 1
self.releaseStartDate = self.sprintEndDate
self.releaseEndDate = self.releaseEndDate = (self.releaseStartDate + self.numberOfSprintsInRelease *(datetime.timedelta(days=self.numberofDaysInSprint)) )
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def startReleaseWork(self, release):
try :
self.releaseVersion = release
self.updatedAt = ""
self.totalRequirements = []
self.totalTestCases = []
self.spillOverStories = []
self.detailsOfEpicsInRelease = []
self.listofEpicsKeyInCurrentRelease = []
self.totalIssuesInRelease = []
self.releaseData =[]
numberOfEpicsRequiredForRelease = (random.randint(02, 05)) #Range for epic releases
# Creating epics in the beginning of the Release.
epic_counts = 1
releaseSample = {}
releaseSample["fixVersion"] = "V." + str(self.releaseVersion)
releaseSample["startDate"] = (self.releaseStartDate).strftime("%Y-%m-%dT%H:%M:%S")
releaseSample ["endDate"] = (self.releaseEndDate).strftime("%Y-%m-%dT%H:%M:%S")
releaseSample["numberOfEpics"] = numberOfEpicsRequiredForRelease
releaseSample['toolName'] = "JIRA"
releaseSample['categoryName'] = "ALM"
self.releaseData.append(releaseSample)
releaseMetadata = {"labels" : ["ALM", "JIRA", "DATA","RELEASE"]}
self.publishToolsData(self.releaseData, releaseMetadata)
while epic_counts <= numberOfEpicsRequiredForRelease :
self.createEpicsForRelease(epic_counts)
epic_counts = epic_counts + 1
jiraMetadata = {"labels" : ["ALM", "JIRA", "DATA"], "dataUpdateSupported" : True, "uniqueKey" : ["key"]}
self.publishToolsData(self.detailsOfEpicsInRelease, jiraMetadata)
for epic in self.detailsOfEpicsInRelease : # Moving epics Relea
epicKey = epic['key']
self.change_Log(epicKey, "status", "In Progress", "To Do", self.updatedAt)
# self.totalIssuesInRelease = np.concatenate((self.totalIssuesInRelease, self.detailsOfEpicsInRelease))
self.numberofSprintInCurrentRelease = self.numberOfSprintsInRelease
rangeNumber = 1
self.sprintStartDate = self.releaseStartDate
self.sprintEndDate = (self.sprintStartDate + datetime.timedelta(days=self.numberofDaysInSprint))
self.isRollbackRelease = False
while rangeNumber <= self.numberofSprintInCurrentRelease : # Looping over the sprints
self.startSprintWork(rangeNumber)
if rangeNumber == self.numberofSprintInCurrentRelease:
isStoriesReleaseReady = True
else :
isStoriesReleaseReady = False
rangeNumber = rangeNumber + 1
if isStoriesReleaseReady :
status = self.serviceNowProcessing(release)
if status == "Success" :
issueDetailData = []
for issue in self.totalIssuesInRelease:
time_offset_seconds = (random.randint(101, 300))
changingDate= self.releaseEndDate - datetime.timedelta(seconds=time_offset_seconds)
self.change_Log(issue['key'], "status", "Done", "Ready for Release", changingDate)
issue["status"] = "Done"
lastUpdated_In_format = changingDate.strftime("%Y-%m-%dT%H:%M:%S")
issue["lastUpdated"] = lastUpdated_In_format
issue["lastUpdatedEpoch"] = int(time.mktime(time.strptime(lastUpdated_In_format, "%Y-%m-%dT%H:%M:%S")))
issue['resolutionDate'] = lastUpdated_In_format
issue['resolutionDateEpoch'] = int(time.mktime(time.strptime(lastUpdated_In_format, "%Y-%m-%dT%H:%M:%S")))
issueDetailData.append(issue)
for issue in self.detailsOfEpicsInRelease:
time_offset_seconds = (random.randint(101, 300))
changingDate= self.releaseEndDate - datetime.timedelta(seconds=time_offset_seconds)
self.change_Log(issue['key'], "status", "Done", "In Progress", changingDate)
issue["status"] = "Done"
lastUpdated_In_format = changingDate.strftime("%Y-%m-%dT%H:%M:%S")
issue["lastUpdated"] = lastUpdated_In_format
issue["lastUpdatedEpoch"] = int(time.mktime(time.strptime(lastUpdated_In_format, "%Y-%m-%dT%H:%M:%S")))
issue['resolutionDate'] = lastUpdated_In_format
issue['resolutionDateEpoch'] = int(time.mktime(time.strptime(lastUpdated_In_format, "%Y-%m-%dT%H:%M:%S")))
issueDetailData.append(issue)
jiraMetadata = {"labels" : ["ALM", "JIRA", "DATA"], "dataUpdateSupported" : True, "uniqueKey" : ["key"]}
self.publishToolsData(issueDetailData, jiraMetadata)
self.sprintStartDate = self.sprintEndDate
self.sprintEndDate = (self.sprintStartDate + datetime.timedelta(days=self.numberofDaysInSprint))
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno,ex)
def createEpicsForRelease(self, epic_count):
try :
epicKey = self.projectKeys[self.project_names.index(self.projectName)] + '-' + str(self.noOfEpicsCreatedSoFar + 1)
jiraSample = {}
jiraSample['key'] = epicKey
jiraSample['priority'] = random.choice(self.Priority)
time_offset_hours_epic = (random.randint(01, 24))
time_offset_seconds_epic = (random.randint(101, 800))
createdDate = self.releaseStartDate + datetime.timedelta(hours=time_offset_hours_epic, seconds=time_offset_seconds_epic)
self.updatedAt = createdDate
jiraSample['createdDate'] = createdDate.strftime("%Y-%m-%dT%H:%M:%S")
jiraSample['lastUpdated'] = createdDate.strftime("%Y-%m-%dT%H:%M:%S")
jiraSample['lastUpdatedEpoch'] = int(time.mktime(time.strptime(createdDate.strftime("%Y-%m-%dT%H:%M:%S"), "%Y-%m-%dT%H:%M:%S")))
jiraSample['status'] = 'To Do'
jiraSample['issueType'] = 'Epic'
jiraSample['projectName'] = self.projectName
jiraSample['reporter'] = random.choice(self.jiraUsers)
jiraSample['toolName'] = "JIRA"
jiraSample['categoryName'] = "ALM"
self.detailsOfEpicsInRelease.append(jiraSample)
self.listofEpicsKeyInCurrentRelease.append(epicKey)
self.noOfEpicsCreatedSoFar = self.noOfEpicsCreatedSoFar + 1
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def startSprintWork(self, rangeNumber):
try:
sprintSample = {}
sprint_data = []
#self.sprintStartDate = self.sprintEndDate
self.detailsOfIssues = []
self.listofIssueInCurrentSprint = []
issue_counts = 1
time_offset_hours = (random.randint(01, 24))
time_offset_seconds = (random.randint(101, 800))
numberOfIssuesToBeCreatedForSprint = (random.randint(10, 30)) # Number of issues in a sprint
#sprintEndDate = (self.sprintStartDate + datetime.timedelta(days=self.numberofDaysInSprint))
#sprintCompleteDate = (self.sprintEndDate + datetime.timedelta(hours=time_offset_hours, seconds=time_offset_seconds))
sprint_EndDate_InFormat = self.sprintEndDate.strftime("%Y-%m-%dT%H:%M:%S")
sprint_StartDate_InFormat = self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S")
sprint_CompleteDate_InFormat = self.sprintCompleteDate.strftime("%Y-%m-%dT%H:%M:%S")
epoch_End = int(time.mktime(time.strptime(sprint_EndDate_InFormat, "%Y-%m-%dT%H:%M:%S")))
epoch_Start = int(time.mktime(time.strptime(sprint_StartDate_InFormat, "%Y-%m-%dT%H:%M:%S")))
epoch_Complete = int(time.mktime(time.strptime(sprint_CompleteDate_InFormat , "%Y-%m-%dT%H:%M:%S")))
sprint_Name = self.projectName + " SPRINT " + str(self.noOfSprintsClosedSoFar + 1)
sprintSample['sprintName'] = (sprint_Name)
self.sprintId = self.projectKey + '_SPRINT_' + str(self.noOfSprintsClosedSoFar + 1)
sprintSample['sprintId'] = self.sprintId
sprintSample['boardId'] = self.boardIdForProjects[self.project_names.index(self.projectName)]
sprintSample['projectName'] = self.projectName
sprintSample['sprintStartDate'] = sprint_StartDate_InFormat
sprintSample['sprintEndDate'] = sprint_EndDate_InFormat
sprintSample['sprintStartDateEpoch'] = epoch_Start
sprintSample['sprintEndDateEpoch'] = epoch_End
sprintSample['insightsTimeX'] = self.sprintEndDate.strftime("%Y-%m-%dT%H:%M:%SZ")
sprintSample['insightsTime'] = epoch_End
sprintSample['toolName'] = "JIRA"
sprintSample['categoryName'] = "ALM"
gmt = time.gmtime()
currentTime = calendar.timegm(gmt)
# Creating issues for each sprint
for releaseBug in self.releaseBugData: #Any Bug created in the previous release
self.totalIssuesInRelease.append(releaseBug)
self.change_Log(releaseBug['key'], "status", "In Progress", "To Do", self.updatedAt)
releaseBug['sprints']= self.sprintId
self.workingOnIssues(releaseBug, rangeNumber,None,None,None,None,None,True)
#def workingOnIssues(self, detail, rangeNumber, git_repo=None, git_branch=None, git_toBranch=None, git_author=None, originalStory=None, forceCloseInCurrentSprint=False):
if self.isRollbackRelease:
sprintsAdded = []
issueDetailData = []
for issue in self.totalIssuesInRelease:
sprintsAdded.append(issue['sprints'])
sprintsAdded.append(self.sprintId)
issue['sprints'] = sprintsAdded
issueDetailData.append(issue)
jiraMetadata = {"labels" : ["ALM", "JIRA", "DATA"], "dataUpdateSupported" : True, "uniqueKey" : ["key"]}
self.publishToolsData(issueDetailData, jiraMetadata)
else :
while issue_counts <= numberOfIssuesToBeCreatedForSprint :
self.creatingIssue()
issue_counts = issue_counts + 1
for spillStory in self.spillOverStories: #Loop for working on sprill over stories from the previous sprints
sprintsAdded = []
updateJiraNode = []
sprintsAdded.append(spillStory['sprints'])
sprintsAdded.append(self.sprintId)
time_offset_seconds = (random.randint(101, 800))
time_offset_hours = (random.randint(01, 05))
self.updatedAt = self.sprintStartDate + datetime.timedelta(hours=time_offset_hours, seconds=time_offset_seconds)
spillStory['lastUpdated'] = self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S")
spillStory['lastUpdatedEpoch'] = int(time.mktime(time.strptime(self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S"), "%Y-%m-%dT%H:%M:%S")))
spillStory['sprints'] = sprintsAdded
git_repo = spillStory["git_repo"]
git_branch = spillStory["git_branch"]
git_author = spillStory['git_author']
spillStory.pop("git_branch", None)
spillStory.pop("git_author", None)
spillStory.pop("git_repo", None)
updateJiraNode.append(spillStory)
self.totalIssuesInRelease.append(spillStory)
jiraMetadata = {"labels" : ["ALM", "JIRA", "DATA"], "dataUpdateSupported" : True, "uniqueKey" : ["key"]}
self.publishToolsData(updateJiraNode, jiraMetadata)
self.workingOnIssues(spillStory, rangeNumber, git_repo, git_branch, None, git_author)
# Started Working On Issues
for detail in self.detailsOfIssues:
self.change_Log(detail['key'], "status", "In Progress", "To Do", self.updatedAt)
self.workingOnIssues(detail, rangeNumber)
if (rangeNumber == (self.numberofSprintInCurrentRelease) and self.releaseVersion == (self.numberofRelease)) :
sprintSample['state'] = "Active"
self.isActiveSprint = True
else :
sprintSample['state'] = "Closed"
self.isActiveSprint = False
sprintSample['sprintCompleteDate'] = sprint_CompleteDate_InFormat
sprintSample['sprintCompleteDateEpoch'] = epoch_Complete
sprint_data.append(sprintSample)
self.noOfSprintsClosedSoFar = self.noOfSprintsClosedSoFar + 1
metadata = { "labels":["SPRINT"], "dataUpdateSupported":True, "uniqueKey":["boardId", "sprintId"]}
self.publishToolsData(sprint_data, metadata)
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno,ex)
def workingOnIssues(self, detail, rangeNumber, git_repo=None, git_branch=None, git_toBranch=None, git_author=None, originalStory=None, forceCloseInCurrentSprint=False):
try :
issueDetailData = []
workingKey = detail['key']
updatedAt = detail["lastUpdatedEpoch"]
self.updatedAt = datetime.datetime.fromtimestamp(updatedAt)
# Generating GIT Data for every issue
git_totalCommits = random.randint(1, 5)
git_count = 1
# Random Choosing of Author,Branch and Repo done before calling git procressing function so that for 1 story, we have 1 branch,repo and author for n no. of commits
if git_repo is None and git_branch is None and git_author is None:
git_author = random.choice(self.author)
git_repo = random.choice(self.repo)
git_branch = random.choice(self.branches)
if git_toBranch is None:
git_toBranch = random.choice(self.master_branches)
while git_count <= git_totalCommits:
isOrphanCommit = bool(random.getrandbits(1))
isBuildSuccess = self.gitProcessing(workingKey, self.updatedAt, git_author, git_repo, git_branch, isOrphanCommit)
git_count = git_count + 1
if rangeNumber != self.numberofSprintInCurrentRelease :
isStoryClosingInCurrentSprint = bool(random.getrandbits(1))
else :
isStoryClosingInCurrentSprint = True
if isStoryClosingInCurrentSprint or forceCloseInCurrentSprint:
self.totalIssuesInRelease.append(detail)
if isBuildSuccess == False:
isBuildSuccess = self.gitProcessing(workingKey, self.updatedAt, git_author, git_repo, git_branch, False, True)
self.pull_request(workingKey, git_repo, git_branch, git_toBranch, git_author, self.updatedAt, "Open")
self.pull_request(workingKey, git_repo, git_branch, git_toBranch, git_author, self.updatedAt, "Merged")
isBuildSuccess = self.gitProcessing(workingKey, self.updatedAt, "root", git_repo, git_branch, False, True)
self.change_Log(workingKey, "status", "Quality Assurance", "In Progress", self.updatedAt)
self.change_Log(workingKey, "status", "QA In Progress", "Quality Assurance", self.updatedAt)
if originalStory is not None :
qtestKey = originalStory['key']
statusOfRequirement = self.qaTestProcessing(qtestKey, self.updatedAt, True)
else :
statusOfRequirement = self.qaTestProcessing(workingKey, self.updatedAt)
lastUpdated_In_format = self.updatedAt.strftime("%Y-%m-%dT%H:%M:%S")
if statusOfRequirement == "Passed":
self.change_Log(workingKey, "status", "Ready for Release", "Quality Assurance", self.updatedAt)
metadata = { "labels":["ALM", "JIRA", "DATA"], "dataUpdateSupported":True, "uniqueKey":["key"]}
self.publishToolsData(issueDetailData, metadata)
issueDetailData = []
originalStory = None
else :
bugData = []
bugDetail = detail.copy()
bugKey = self.projectKeys[self.project_names.index(self.projectName)] + '-' + str(self.issueCreationStarted + 1)
self.issueCreationStarted = self.issueCreationStarted + 1
bugDetail ["key"] = bugKey
bugDetail ['issueType'] = "Bug"
bugDetail ['storyPoints'] = ""
bugDetail ['summary'] = "Bug Raised for " + workingKey
bugDetail["createdAt"] = self.updatedAt.strftime("%Y-%m-%dT%H:%M:%S");
bugDetail['toolName'] = "JIRA"
bugDetail['categoryName'] = "ALM"
self.change_Log(bugKey, "status", "In Progress", "To Do", self.updatedAt)
self.change_Log(workingKey, "status", "Reopened", "QA In Progress", self.updatedAt)
originalStory = detail.copy()
detail["status"] = "Reopened"
detail["bugKeyAssociate"] = bugKey
issueDetailData.append(detail)
metadata = { "labels":["ALM", "JIRA", "DATA"], "dataUpdateSupported":True, "uniqueKey":["key"]}
self.publishToolsData(issueDetailData, metadata)
issueDetailData = []
self.workingOnIssues(bugDetail, rangeNumber, git_repo, git_branch, git_toBranch, git_author, originalStory, True)
else :
detail["git_repo"] = git_repo
detail["git_branch"] = git_branch
detail['git_author'] = git_author
self.spillOverStories.append(detail)
except Exception as ex:
logging.error(ex)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def creatingIssue(self):
jiraSample = {}
issueKey = self.projectKeys[self.project_names.index(self.projectName)] + '-' + str(self.issueCreationStarted + 1)
jiraSample['key'] = issueKey
jiraSample['priority'] = random.choice(self.Priority)
self.updatedAt = self.sprintStartDate
jiraSample['createdDate'] = self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S")
jiraSample['lastUpdated'] = self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S")
jiraSample['lastUpdatedEpoch'] = int(time.mktime(time.strptime(self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S"), "%Y-%m-%dT%H:%M:%S")))
jiraSample['status'] = 'To Do'
issueType = random.choice(self.issueTypes)
jiraSample['issueType'] = issueType
jiraSample['projectName'] = self.projectName
jiraSample['reporter'] = random.choice(self.jiraUsers)
jiraSample['assignee'] = random.choice(self.jiraUsers)
jiraSample['sprints'] = self.sprintId
jiraSample['epicKey'] = random.choice(self.listofEpicsKeyInCurrentRelease)
jiraSample['boardId'] = self.boardIdForProjects[self.project_names.index(self.projectName)]
jiraSample['fixVersion'] = "V." + str(self.releaseVersion)
jiraSample['toolName'] = "JIRA"
jiraSample['categoryName'] = "ALM"
if issueType == 'Story':
jiraSample['storyPoints'] = random.choice(self.storyPoints)
self.issueCreationStarted = self.issueCreationStarted + 1
self.detailsOfIssues.append(jiraSample)
self.requirement_data = self.createRequiremnetForQtest(issueKey)
self.listofIssueInCurrentSprint.append(issueKey)
#self.totalIssuesInRelease.append(jiraSample)
def createRequiremnetForQtest (self, key):
requirement_data = []
# self.totalTestCases = []
testCaseIds = []
requirementSample = {}
requirementSample['almType'] = "requirements"
requirementSample ['jiraKey'] = key
requirementSample['projectName'] = self.projectName
requirementSample['priority'] = random.choice(self.Priority)
requirementSample['submitter'] = random.choice(self.submitter)
requirementSample['assignee'] = random.choice(self.assignedto)
requirementSample['severity'] = random.choice(self.severity)
requirementSample ['module'] = random.choice(self.module)
requirementSample['creationDate'] = self.updatedAt.strftime("%Y-%m-%dT%H:%M:%S")
requirement_id = "RQ_" + str(''.join([random.choice(string.digits) for n in xrange(10)]))
requirementSample['requirement_id'] = requirement_id
noOfTestCases = random.randint(1, 5)
testCase = 1
while testCase <= noOfTestCases :
testCaseId = self.createTestCaseForQtest(requirement_id)
testCaseIds.append(testCaseId)
testCase = testCase + 1
requirementSample['test_ids'] = testCaseIds
requirement_data.append(requirementSample)
self.totalRequirements.append(requirementSample)
# metadata = { "labels":["ALM", "QTEST", "DATA"], "dataUpdateSupported":True, "uniqueKey":["requirement_id", "jiraKey"]}
# self.publishToolsData(requirement_data, metadata)
return requirementSample
def createTestCaseForQtest(self, requirement_id):
testcase_data = []
testCaseSample = {}
testCaseSample ['requirementId'] = requirement_id
testCaseSample['almType'] = "test_case"
testCaseSample['summary'] = "This test Case deals with " + requirement_id
testCaseId = "TEST_" + str(''.join([random.choice(string.digits) for n in xrange(10)]))
testCaseSample ['testCase_id'] = testCaseId
testcase_data.append(testCaseSample)
self.totalTestCases.append(testCaseSample)
# metadata = { "labels":["ALM", "QTEST", "DATA"], "dataUpdateSupported":True, "uniqueKey":["testCase_id"]}
# self.publishToolsData(testcase_data, metadata)
return testCaseId
def gitProcessing (self, workingKey, updatedAt, git_author, git_repo, git_branch, isOrphanCommit, isForceSuccessRequired=False):
# git_totalCommits = random.randint(1, 12)
# git_count = 0
git_data = []
jenkins_data = []
commitId = uuid.uuid4().hex
time_offset = (random.randint(101, 800))
git_date = (updatedAt + datetime.timedelta(seconds=time_offset))
git_datetime_epoch = int(time.mktime(git_date.timetuple()))
self.updatedAt = git_date
gitSample = {}
gitSample['gitCommitTime'] = git_date.strftime("%Y-%m-%dT%H:%M:%S")
timeStampField = "gitCommitTime",
timeStampFormat = "%Y-%m-%dT%H:%M:%S",
isEpoch = False
if isForceSuccessRequired == False :
if isOrphanCommit :
gitSample['message'] = 'This commit is associated with jira-key : # ' + workingKey
else :
gitSample['jiraKey'] = workingKey
gitSample['message'] = 'This commit is associated with jira-key : ' + workingKey
else :
gitSample['jiraKey'] = workingKey
gitSample['message'] = 'Force Success : ' + workingKey
gitSample['authorName'] = git_author
gitSample['branchName'] = git_branch
gitSample['repoName'] = git_repo
gitSample['commitId'] = commitId
gitSample['toolName'] = "GIT"
# gitSample['updatedAt'] = self.updatedAt
gitSample['categoryName'] = "SCM"
# git_count += 1
# git_CommitArr.append(gitSample)
git_data.append(gitSample)
gitMetadata = {"labels" : ["SCM", "GIT", "DATA"]}
# self.updatedAt = git_date
self.publishToolsData(git_data, gitMetadata, "gitCommitTime","%Y-%m-%dT%H:%M:%S", isEpoch)
isJenkinsBuildSuccess = self.jenkinsProcessing(self.updatedAt, commitId, isForceSuccessRequired)
return isJenkinsBuildSuccess
def jenkinsProcessing(self, updatedAt, commitId, isForceSuccessRequired=False):
jenkins_data = []
time_offset = (random.randint(101, 800))
# self.jenkinsBuildNumber = self.jenkinsBuildNumber +1
# print('a jenkine key '+randomJenkineBuildNumber +' '+gitSampleData['inSightsTimeX']) #+' '+gitSample['git_date']
isOrphanBuild = bool(random.getrandbits(1))
jenkins_date = (updatedAt + datetime.timedelta(seconds=120))
self.updatedAt = jenkins_date
# self.printLog('GIT Commit Id '+gitSampleData['commitId']+' GIT Date '+ gitSampleData['inSightsTimeX'] +' Jenkine Date '+str(jenkins_date), False)
jenkins_startTime = (jenkins_date)
jenkins_endTime = (jenkins_date + datetime.timedelta(seconds=time_offset))
jenkine_epochtime = int(time.mktime(jenkins_date.timetuple()))
self.updatedAt = jenkins_endTime
# jenkins_Buildstatus =random.choice(jenkins_status)
jenkinsSample = {}
# jenkinsSample['inSightsTimeX'] = (jenkins_date).strftime("%Y-%m-%dT%H:%M:%SZ")
# jenkinsSample['inSightsTime'] = jenkine_epochtime
jenkinsSample['startTime'] = jenkins_startTime.strftime("%Y-%m-%dT%H:%M:%S")
jenkinsSample['endTime'] = jenkins_endTime.strftime("%Y-%m-%dT%H:%M:%S")
jenkinsSample['duration'] = (jenkins_endTime - jenkins_startTime).seconds
# jenkinsSample['sprintID'] = random.choice(sprint)
buildNumberString = self.projectId + str(self.jenkinsBuildNumber)
jenkinsSample['buildNumber'] = buildNumberString
jenkinsSample['jobName'] = random.choice(self.job_name)
jenkinsSample['projectName'] = self.projectName
# jenkinsSample['updatedAt'] =self.updatedAt
# jenkinsSample['projectID'] = random.choice(projectId)
jenkinsSample['environment'] = random.choice(self.jen_env)
jenkinsSample['buildUrl'] = random.choice(self.buildUrl)
# jenkinsSample['result'] = random.choice(result)
jenkinsSample['master'] = random.choice(self.master)
jenkinsSample['toolName'] = "JENKINS"
jenkinsSample['categoryName'] = "CI"
# print(jenkinsSample)
timeStampField = "startTime",
timeStampFormat = "%Y-%m-%dT%H:%M:%S",
isEpoch = False
# jenkinsSample['jenkins_date']=str(jenkins_date)
# if rangeNumber < 2001 :
if isForceSuccessRequired == False :
jenkinsStatus = random.choice(self.jenkins_status)
if isOrphanBuild :
jenkinsSample['message'] = "Build triggered Manually"
else :
jenkinsSample['scmcommitId'] = commitId
jenkinsSample['message'] = "Build triggered for commit Id " + commitId
if jenkinsStatus == "Success" or jenkinsStatus == "Unstable": # Example Maven compilation is suceess.
isJenkinsBuildSuccess = self.sonarProcessing(buildNumberString, self.updatedAt);
if isJenkinsBuildSuccess == True:
jenkinsSample['status'] = jenkinsStatus
else :
jenkinsSample['status'] = "Failed"
else :
jenkinsSample['status'] = jenkinsStatus
isJenkinsBuildSuccess = False
else :
jenkinsStatus = "Success"
jenkinsSample['status'] = jenkinsStatus
jenkinsSample['scmcommitId'] = commitId
jenkinsSample['message'] = "Force Success Build triggered for commit Id " + commitId
isJenkinsBuildSuccess = self.sonarProcessing(buildNumberString, self.updatedAt, True);
# sendStatus = True
# Nexus :- Artifacts.. (deployed artifacts in which enviornment) (Artifacts , attach name :- relase name )
jenkins_data.append(jenkinsSample)
jenkinsSample = {}
self.jenkinsBuildNumber = self.jenkinsBuildNumber + 1
jenkinsMetadata = {"labels" : ["CI", "JENKINS", "DATA"]}
self.publishToolsData(jenkins_data, jenkinsMetadata, "startTime", "%Y-%m-%dT%H:%M:%S", isEpoch)
return isJenkinsBuildSuccess
def sonarProcessing(self, buildNumberString, updatedAt, isForceSuccessRequired=False):
sonar_data = []
isManualAnalysis = bool(random.getrandbits(1))
ramdomSonarKey = str(''.join([random.choice(string.digits) for n in xrange(10)]))
sonar_date = (updatedAt + datetime.timedelta(seconds=120))
time_offset = (random.randint(101, 800))
# self.printLog('Jenkine build number '+jenkinsSampleData['buildNumber']+' Jenkine Date '+jenkinsSampleData['inSightsTimeX']+' Sonar Date '+str(sonar_date), False)
sonar_startTime = sonar_date.strftime("%Y-%m-%dT%H:%M:%S")
sonar_endTime = (sonar_date + datetime.timedelta(seconds=time_offset)).strftime("%Y-%m-%dT%H:%M:%S")
self.updatedAt = sonar_date + datetime.timedelta(seconds=time_offset)
sonarSample = {}
# sonarSample['inSightsTimeX'] = sonar_date.strftime("%Y-%m-%dT%H:%M:%SZ")
# sonarSample['inSightsTime'] = int(time.mktime(sonar_date.timetuple()))
sonarSample['startTime'] = sonar_startTime
sonarSample['endTime'] = sonar_endTime
sonarSample['projectname'] = self.projectName
# sonarSample['ProjectID'] = random.choice(projectId)
# sonarSample['ProjectKey'] = random.choice(sonar_key)
sonarSample['resourceKey'] = random.choice(self.resourceKey)
# if rangeNumber < (jenkine_success_build - 200) :
sonarSample['sonarKey'] = ramdomSonarKey
sonarSample['sonarCoverage'] = random.choice(self.sonar_coverage)
sonarSample['sonarComplexity'] = random.choice(self.sonar_complexity)
sonarSample['sonarDuplicateCode'] = random.choice(self.sonar_duplicate)
sonarSample['sonarTechDepth'] = random.choice(self.sonar_techdepth)
sonarSample['code_coverage'] = random.choice(self.sonar_codeCoverage)
sonarSample['toolName'] = "SONAR"
sonarSample['categoryName'] = "CODEQUALITY"
if isForceSuccessRequired == False:
sonarStatus = random.choice(self.sonar_quality_gate_Status)
sonarSample['sonarQualityGateStatus'] = sonarStatus
if isManualAnalysis:
sonarSample['message'] = "Manually Initiated analysis"
buildNumberString = None
# isJenkinsBuildSuccess ="Failed" # changing the return status for jenkins build.
else :
sonarSample['message'] = "Triggered by jenkins"
sonarSample['jenkinsBuildNumber'] = buildNumberString
# isJenkinsBuildSuccess = sonarStaus
if sonarStatus == "OK":
isJenkinsBuildSuccess = self.nexusProcessing(buildNumberString, self.updatedAt);
else :
isJenkinsBuildSuccess = False
if isManualAnalysis :
isJenkinsBuildSuccess = False
else :
sonarStatus = "OK"
sonarSample['sonarQualityGateStatus'] = sonarStatus
sonarSample['jenkinsBuildNumber'] = buildNumberString
sonarSample['message'] = "Force Success"
isJenkinsBuildSuccess = self.nexusProcessing(buildNumberString, self.updatedAt, True, True);
sonar_data.append(sonarSample)
timeStampField = "startTime",
timeStampFormat = "%Y-%m-%dT%H:%M:%S",
isEpoch = False
sonarMetadata = {"labels" : ["CODEQUALITY", "SONAR", "DATA"]}
self.publishToolsData(sonar_data, sonarMetadata, "startTime", "%Y-%m-%dT%H:%M:%S", isEpoch)
return isJenkinsBuildSuccess
def nexusProcessing(self, buildNumberString, updatedAt, isSnapshot=True, isForceSuccessRequired=False):
nexus_data = []
nexus_date = (updatedAt + datetime.timedelta(seconds=120))
nexus_Date_in_format = nexus_date.strftime("%Y-%m-%dT%H:%M:%S")
nexus_date_epoch = int(time.mktime(time.strptime(nexus_Date_in_format, "%Y-%m-%dT%H:%M:%S")))
# time_offset = (random.randint(101, 800))
# nexus_startTime = nexus_date
# nexus_endTime = (nexus_date + datetime.timedelta(seconds=time_offset))
self.updatedAt = nexus_date
if isForceSuccessRequired == False :
nexusStatus = random.choice(self.nexus_status)
else :
nexusStatus = "succeeded"
for artifactid in self.artifacts_id:
nexusSample = {}
nexusSample['status'] = nexusStatus
nexusSample['artifactId'] = artifactid
nexusSample['groupId'] = self.group_ids[self.project_names.index(self.projectName)]
if buildNumberString is not None :
nexusSample["jenkinsBuildNumber"] = buildNumberString
nexusSample['projectName'] = self.projectName
nexusSample['uploadedDate'] = nexus_Date_in_format
nexusSample['toolName'] = "NEXUS"
nexusSample['categoryName'] = "ARTIFACTMANAGEMENT"
if isSnapshot :
if artifactid == "UI" :
artifactName = artifactid + "-" + str(self.releaseVersion) + str(nexus_date_epoch) + ".zip"
elif artifactid == "Service":
artifactName = artifactid + "-" + str(self.releaseVersion) + str(nexus_date_epoch) + ".war"
else :
artifactName = artifactid + "-" + str(self.releaseVersion) + str(nexus_date_epoch) + ".jar"
repoName = self.repoIdSnapshot[self.project_names.index(self.projectName)]
else :
repoName = self.repoIdRelease[self.project_names.index(self.projectName)]
if artifactid == "UI" :
artifactName = artifactid + "-" + str(self.releaseVersion) + ".zip"
elif artifactid == "Service":
artifactName = artifactid + "-" + str(self.releaseVersion) + ".war"
else :
artifactName = artifactid + "-" + str(self.releaseVersion) + ".jar"
nexusSample['artifactName'] = artifactName
nexusSample['repoName'] = repoName
nexus_data.append(nexusSample)
nexusMetadata = {"labels" : ["ARTIFACTMANAGEMENT", "NEXUS", "DATA"]}
timeStampField = "uploadedDate",
timeStampFormat = "%Y-%m-%dT%H:%M:%S",
isEpoch = False
self.publishToolsData(nexus_data, nexusMetadata, "uploadedDate", "%Y-%m-%dT%H:%M:%S", isEpoch)
if nexusStatus == "succeeded":
isJenkinsBuildSuccess = self.rundeckProcessing(buildNumberString, self.updatedAt, isForceSuccessRequired);
else :
isJenkinsBuildSuccess = False
return isJenkinsBuildSuccess
def rundeckProcessing(self, buildNumberString, updatedAt, isForceSuccessRequired=False):
rundeck_data = []
isManualDeployment = bool(random.getrandbits(1))
rundeck_date = (updatedAt + datetime.timedelta(seconds=120))
time_offset = (random.randint(101, 800))
rundeck_startTime = rundeck_date
rundeck_endTime = (rundeck_date + datetime.timedelta(seconds=time_offset))
self.updatedAt = rundeck_endTime
rundeckSample = {}
# rundeckSample['inSightsTimeX'] = rundeck_date.strftime("%Y-%m-%dT%H:%M:%SZ")
# rundeckSample['inSightsTime'] = int(time.mktime(rundeck_startTime.timetuple()))
rundeckSample['startTime'] = rundeck_startTime.strftime("%Y-%m-%dT%H:%M:%S")
rundeckSample['endTime'] = rundeck_endTime.strftime("%Y-%m-%dT%H:%M:%S")
rundeckStaus = random.choice(self.rundeck_status)
rundeckSample['environment'] = random.choice(self.rundeck_env)
rundeckSample['projectName'] = self.projectName
# if rangeNumber < (jenkine_success_build - 200) :
if isForceSuccessRequired == False :
if isManualDeployment :
rundeckSample['message'] = "Manual Deployment"
# rundeckStaus = "failed" # Appending property for jenkins build.
else:
if buildNumberString is not None :
rundeckSample['jenkinsBuildNumber'] = buildNumberString
rundeckSample['message'] = "Deployment triggered by jenkins"
if rundeckStaus == "succeeded":
isJenkinsBuildSuccess = True
else :
isJenkinsBuildSuccess = False
if isManualDeployment :
isJenkinsBuildSuccess = False
else :
isJenkinsBuildSuccess = True;
rundeckStaus = "succeeded"
rundeckSample['jenkinsBuildNumber'] = buildNumberString
rundeckSample['message'] = "Force Success"
rundeckSample['status'] = rundeckStaus
rundeckSample['toolName'] = "RUNDECK"
rundeckSample['categoryName'] = "DEPLOYMENT"
rundeck_data.append(rundeckSample)
RundeckMetadata = {"labels" : ["DEPLOYMENT", "RUNDECK", "DATA"]}
# print(len(rundeck_data))
timeStampField = "startTime",
timeStampFormat = "%Y-%m-%dT%H:%M:%S",
isEpoch = False
# total_record_count =total_record_count + len(rundeck_data)
self.publishToolsData(rundeck_data, RundeckMetadata, "startTime", "%Y-%m-%dT%H:%M:%S", isEpoch)
return isJenkinsBuildSuccess
def qaTestProcessing(self, key, updatedAt, isForceSuccessRequired=False):
statusOfRequirement = "Passed"
requirementData = []
testData = []
for requirement in self.totalRequirements:
if requirement['jiraKey'] == key :
requirementId = requirement['requirement_id']
for testCase in self.totalTestCases:
if requirementId == testCase['requirementId']:
if isForceSuccessRequired == False :
statusOfTest = bool(random.getrandbits(1))
if (statusOfTest == False) :
statusOfRequirement = "Failed"
statusOfTest = "Failed"
else :
statusOfTest = "Success"
statusOfRequirement = "Passed"
testCase ['status'] = statusOfTest
testData.append(testCase)
metadata = { "labels":["ALM", "QTEST", "DATA"]}
self.publishToolsData(testData, metadata)
requirement['status'] = statusOfRequirement
requirementData.append(requirement)
metadata = { "labels":["ALM", "QTEST", "DATA"]}
self.publishToolsData(requirementData, metadata)
break
return statusOfRequirement
def serviceNowProcessing(self, releaseVersion):
serviceNowSample = {}
serviceNowData = []
self.releaseBugData = []
serviceNowSample ['incident_id'] = 'IN_' + str(''.join([random.choice(string.digits) for n in xrange(10)]))
serviceNowSample ['release_Version'] = "V."+str(releaseVersion)
serviceNowSample ['project_name'] = self.projectName
status = random.choice(self.releaseStatus)
if status == "Success":
self.isRollbackRelease = False
serviceNowSample['summary'] ="Release completed"
elif status == "Bug Raised" :
self.isRollbackRelease = False
status = "Success"
bugSample = {}
bugSample['lastUpdated'] = self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S")
bugSample['lastUpdatedEpoch'] = int(time.mktime(time.strptime(self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S"), "%Y-%m-%dT%H:%M:%S")))
bugKey=self.projectKeys[self.project_names.index(self.projectName)] + '-' + str(self.issueCreationStarted + 1)
self.issueCreationStarted = self.issueCreationStarted + 1
bugSample ['key'] = bugKey
bugSample ['issueType'] = "Bug"
bugSample ["summary"] = "Issue generated in the release " + str(releaseVersion)
serviceNowSample['summary'] ="Bug is raised " +str(bugKey)
self.releaseBugData.append(bugSample)
elif status == "Rollback" :
self.isRollbackRelease = True
bugSample = {}
bugSample['lastUpdated'] = self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S")
bugSample['lastUpdatedEpoch'] = int(time.mktime(time.strptime(self.sprintStartDate.strftime("%Y-%m-%dT%H:%M:%S"), "%Y-%m-%dT%H:%M:%S")))
bugKey=self.projectKeys[self.project_names.index(self.projectName)] + '-' + str(self.issueCreationStarted + 1)
self.issueCreationStarted = self.issueCreationStarted + 1
bugSample ['key'] = bugKey
bugSample ['issueType'] = "Bug"
bugSample ["summary"] = "Issue generated in the release " + str(releaseVersion)
serviceNowSample['summary'] ="Bug is raised " +str(bugKey)
self.releaseBugData.append(bugSample)
self.numberofSprintInCurrentRelease = self.numberofSprintInCurrentRelease + 1
serviceNowSample ['status'] = status
serviceNowData.append(serviceNowSample)
metadata = { "labels":["MONITOR", "SERVICENOW", "DATA"]}
self.publishToolsData(serviceNowData, metadata)
return status
def change_Log(self, changeKey, state, toString, fromString, updatedAt):
print(changeKey)
print(toString)
changeLogData = []
changeLog = {}
changeLog["state"] = state
changeLog["fromString"] = fromString
changeLog['issueKey'] = changeKey
changeLog['toString'] = toString
time_offset_hours_change = (random.randint(01, 24))
time_offset_seconds_change = (random.randint(101, 800))
time_offset_days_change = (random.randint(01, 05))
self.updatedAt = (updatedAt + datetime.timedelta(days=time_offset_days_change, hours=time_offset_hours_change, seconds=time_offset_seconds_change))
changeLog['changeDate'] = self.updatedAt.strftime("%Y-%m-%dT%H:%M:%S")
changeLogData.append(changeLog)
metadata = { "labels":["CHANGE_LOG", "DATA"]}
self.publishToolsData(changeLogData, metadata, "changeDate", "%Y-%m-%dT%H:%M:%S", False)
def pull_request(self, changeKey, git_repo, git_branch, git_toBranch, git_author, updatedAt, state) :
pullRequestData = []
pullRequest = {}
pullRequest["pullrequest_id"] = changeKey
pullRequest["repo"] = git_repo
pullRequest["fromBranch"] = git_branch
pullRequest['toBranch'] = git_toBranch
pullRequest['author'] = git_author
pullRequest['state'] = state
pullRequest ['jiraKey'] = changeKey
time_offset_hours_change = (random.randint(01, 24))
time_offset_seconds_change = (random.randint(101, 800))
time_offset_days_change = (random.randint(01, 05))
self.updatedAt = (updatedAt + datetime.timedelta(days=time_offset_days_change, hours=time_offset_hours_change, seconds=time_offset_seconds_change))
if state == "Open" :
pullRequest['raisedAt'] = self.updatedAt.strftime("%Y-%m-%dT%H:%M:%S")
else :
pullRequest['mergedAt'] = self.updatedAt.strftime("%Y-%m-%dT%H:%M:%S")
pullRequestData.append(pullRequest)
metadata = { "labels":["PULL_REQUEST_LOGS", "DATA"]}
self.publishToolsData(pullRequestData, metadata, "changeDate", "%Y-%m-%dT%H:%M:%S", False)
metadata = { "labels":["PULL_REQUESTS", "DATA"], "dataUpdateSupported":True, "uniqueKey":["pullrequest_id"]}
self.publishToolsData(pullRequestData, metadata)
# self.pullRequestNo =self.pullRequestNo +1
if __name__ == "__main__":
DummyDataAgent()
|
|
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
from logbook import Logger
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
merge,
partition_all,
sliding_window,
valmap,
)
from toolz.curried import operator as op
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MapAssetIdentifierIndexError,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
ValueNotFoundForField,
SidsNotFound,
SymbolNotFound,
)
from . import (
Asset, Equity, Future,
)
from . continuous_futures import (
ADJUSTMENT_STYLES,
CHAIN_PREDICATES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from zipline.utils.control_flow import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
rows = sa.select(table.c).execute().fetchall()
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder.
PERSISTENT_TOKEN = "<AssetFinder>"
@preprocess(engine=coerce_string_to_eng)
def __init__(self, engine, future_chain_predicates=CHAIN_PREDICATES):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._caches = (self._asset_cache, self._asset_type_cache) = {}, {}
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def _reset_caches(self):
"""
Reset our asset caches.
You probably shouldn't call this method.
"""
# This method exists as a workaround for the in-place mutating behavior
# of `TradingAlgorithm._write_and_map_id_index_to_sids`. No one else
# should be calling this.
for cache in self._caches:
cache.clear()
self.reload_symbol_maps()
def reload_symbol_maps(self):
"""Clear the in memory symbol lookup maps.
This will make any changes to the underlying db available to the
symbol maps.
"""
# clear the lazyval caches, the next access will requery
try:
del type(self).symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).fuzzy_symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).equity_supplementary_map[self]
except KeyError:
pass
try:
del type(self).equity_supplementary_map_by_sid[self]
except KeyError:
pass
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def fuzzy_symbol_ownership_map(self):
fuzzy_mappings = {}
for (cs, scs), owners in iteritems(self.symbol_ownership_map):
fuzzy_owners = fuzzy_mappings.setdefault(
cs + scs,
[],
)
fuzzy_owners.extend(owners)
fuzzy_owners.sort()
return fuzzy_mappings
@lazyval
def equity_supplementary_map(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.value),
value_from_row=lambda row: row.value,
)
@lazyval
def equity_supplementary_map_by_sid(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.sid),
value_from_row=lambda row: row.value,
)
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
symbol_cols = self.equity_symbol_mappings.c
inner = sa.select(
(symbol_cols.sid,) +
tuple(map(
op.getitem(symbol_cols),
symbol_columns,
)),
).where(
symbol_cols.sid.in_(map(int, sid_group)),
).order_by(
symbol_cols.end_date.asc(),
)
return sa.select(inner.c).group_by(inner.c.sid)
def _lookup_most_recent_symbols(self, sids):
symbols = {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
),
)
}
if len(symbols) != len(sids):
raise EquitiesNotFound(
sids=set(sids) - set(symbols),
plural=True,
)
return symbols
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
symbols=self._lookup_most_recent_symbols(sids)):
return merge(row, symbols[row['sid']])
else:
mkdict = dict
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def _lookup_symbol_strict(self, symbol, as_of_date):
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.symbol_ownership_map[
company_symbol,
share_class_symbol,
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this ticker, this is ambigious
# without the date
raise MultipleSymbolsFound(
symbol=symbol,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this symbol, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
def _lookup_symbol_fuzzy(self, symbol, as_of_date):
symbol = symbol.upper()
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.fuzzy_symbol_ownership_map[
company_symbol + share_class_symbol
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held a symbol matching the fuzzy symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) == 1:
# only one valid match
return self.retrieve_asset(owners[0].sid)
options = []
for _, _, sid, sym in owners:
if sym == symbol:
# there are multiple options, look for exact matches
options.append(self.retrieve_asset(sid))
if len(options) == 1:
# there was only one exact match
return options[0]
# there are more than one exact match for this fuzzy symbol
raise MultipleSymbolsFound(
symbol=symbol,
options=set(options),
)
options = {}
for start, end, sid, sym in owners:
if start <= as_of_date < end:
# see which fuzzy symbols were owned on the asof date.
options[sid] = sym
if not options:
# no equity owned the fuzzy symbol on the date requested
raise SymbolNotFound(symbol=symbol)
sid_keys = list(options.keys())
# If there was only one owner, or there is a fuzzy and non-fuzzy which
# map to the same sid, return it.
if len(options) == 1:
return self.retrieve_asset(sid_keys[0])
for sid, sym in options.items():
# Possible to have a scenario where multiple fuzzy matches have the
# same date. Want to find the one where symbol and share class
# match.
if (company_symbol, share_class_symbol) == \
split_delimited_symbol(sym):
return self.retrieve_asset(sid)
# multiple equities held tickers matching the fuzzy ticker but
# there are no exact matches
raise MultipleSymbolsFound(
symbol=symbol,
options=[self.retrieve_asset(s) for s in sid_keys],
)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
return self._lookup_symbol_fuzzy(symbol, as_of_date)
return self._lookup_symbol_strict(symbol, as_of_date)
def lookup_symbols(self, symbols, as_of_date, fuzzy=False):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
Returns
-------
equities : list[Equity]
"""
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = self.lookup_symbol(sym, as_of_date, fuzzy)
append_output(equity)
return out
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def lookup_by_supplementary_field(self, field_name, value, as_of_date):
try:
owners = self.equity_supplementary_map[
field_name,
value,
]
assert owners, 'empty owners list for %r' % (field_name, value)
except KeyError:
# no equity has ever held this value
raise ValueNotFoundForField(field=field_name, value=value)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this value, this is ambigious
# without the date
raise MultipleValuesFoundForField(
field=field_name,
value=value,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this value, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the value on the given asof date
raise ValueNotFoundForField(field=field_name, value=value)
def get_supplementary_field(
self,
sid,
field_name,
as_of_date,
):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid)
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(fc_cols.start_date != pd.NaT.value)).order_by(
fc_cols.sid).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange=exchange,
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures consracts in the asset finder.',
)
del _make_sids
@lazyval
def _symbol_lookups(self):
"""
An iterable of symbol lookup functions to use with ``lookup_generic``
Attempts equities lookup, then futures.
"""
return (
self.lookup_symbol,
# lookup_future_symbol method does not use as_of date, since
# symbols are unique.
#
# Wrap the function in a lambda so that both methods share a
# signature, so that when the functions are iterated over
# the consumer can use the same arguments with both methods.
lambda symbol, _: self.lookup_future_symbol(symbol)
)
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidsNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
for lookup in self._symbol_lookups:
try:
matches.append(lookup(asset_convertible, as_of_date))
return
except SymbolNotFound:
continue
else:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidsNotFound(sids=[asset_convertible_or_iterable])
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# If the input is a ContinuousFuture just return it as-is.
elif isinstance(asset_convertible_or_iterable, ContinuousFuture):
return asset_convertible_or_iterable, missing
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
if isinstance(obj, ContinuousFuture):
matches.append(obj)
else:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
if missing:
raise ValueError("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
)
def only_active_assets(reference_date_value, assets):
"""
Filter an iterable of Asset objects down to just assets that were alive at
the time corresponding to `reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
assets : iterable[Asset]
The assets to filter.
Returns
-------
active_assets : list
List of the active assets from `assets` on the requested date.
"""
return [a for a in assets if was_active(reference_date_value, a)]
|
|
"""
The ArcREST API allows you to perform administrative tasks not available in
the Portal for ArcGIS website. The API is organized into resources and
operations. Resources are entities within Portal for ArcGIS that hold some
information and have a well-defined state. Operations act on these
resources and update their information or state. Resources and operations
are hierarchical and have unique universal resource locators (URLs).
"""
import json
from datetime import datetime
from .._abstract.abstract import BaseAGOLClass
from ..security import PortalTokenSecurityHandler,ArcGISTokenSecurityHandler,OAuthSecurityHandler,AGOLTokenSecurityHandler
########################################################################
class _log(BaseAGOLClass):
"""handles the portal log information at 10.3.1+"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_resources = None
_operations = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if url.lower().endswith("/log") == False:
url = url + "/log"
self._url = url
self._securityHandler = securityHandler
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in manageportal.administration.log class."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def resources(self):
"""returns the admin sites resources"""
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
@property
def operations(self):
"""lists operations available to user"""
if self._operations is None:
self.__init()
return self._operations
#----------------------------------------------------------------------
@property
def settings(self):
"""returns the log settings for portal"""
url = self._url + "/settings"
params = {
"f" : "json",
}
return self._do_get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def editLogSettings(self, logLocation, logLevel="WARNING", maxLogFileAge=90):
"""
edits the log settings for the portal site
Inputs:
logLocation - file path to where you want the log files saved
on disk
logLevel - this is the level of detail saved in the log files
Levels are: OFF, SEVERE, WARNING, INFO, FINE, VERBOSE, and
DEBUG
maxLogFileAge - the numbers of days to keep a single log file
"""
url = self._url + "/settings/edit"
params = {
"f" : "json",
"logDir" : logLocation,
"logLevel" : logLevel,
"maxLogFileAge" : maxLogFileAge
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def query(self, logLevel="WARNING", source="ALL",
startTime=None, endTime=None,
logCodes=None, users=None, messageCount=1000):
"""
allows users to look at the log files from a the REST endpoint
Inputs:
logLevel - this is the level of detail saved in the log files
Levels are: OFF, SEVERE, WARNING, INFO, FINE, VERBOSE, and
DEBUG
source - the type of information to search. Allowed values
are: ALL, PORTAL_ADMIN, SHARING, PORTAL
startTime - datetime object to start search at
endTime - datetime object to end search
logCodes - comma seperate list of codes to search
users - comma seperated list of users to query
messageCount - integer number of the max number of log
entries to return to the user.
"""
url = self._url + "/query"
filter_value = {"codes":[], "users":[], "source": "*"}
if source.lower() == "all":
filter_value['source'] = "*"
else:
filter_value['source'] = [source]
params = {
"f" : "json",
"level" : logLevel
}
if not startTime is None and \
isinstance(startTime, datetime):
params['startTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S")#2015-01-31T15:00:00
if not endTime is None and \
isinstance(endTime, datetime):
params['endTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S")
if not logCodes is None:
filter_value['codes'] = logCodes.split(',')
if not users is None:
filter_value['users'] = users.split(',')
if messageCount is None:
params['pageSize'] = 1000
elif isinstance(messageCount, (int, long, float)):
params['pageSize'] = int(messageCount)
else:
params['pageSize'] = 1000
params['filter'] = filter_value
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def cleanLogs(self):
"""erases all the log data"""
url = self._url + "/clean"
params = {
"f":"json"
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class _Security(BaseAGOLClass):
"""
The security resource is the root of all the security configurations
and operations in the portal. Through this resource, you can change
the identity providers and the authentication mode for your portal.
"""
_securityHandler = None
_url = None
_proxy_url = None
_proxy_port = None
_json = None
_json_dict = None
_resources = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if securityHandler is None:
pass
elif isinstance(securityHandler, PortalTokenSecurityHandler) or \
isinstance(securityHandler, ArcGISTokenSecurityHandler) or \
isinstance(securityHandler, OAuthSecurityHandler):
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in manageportal.administration.log class."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def resources(self):
"""returns the admin sites resources"""
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
def createUser(self,
username,
password,
fullname,
email,
role="org_user",
provider="arcgis",
description="",
idpUsername=None):
"""
This operation is used to create a new user account in the portal.
Inputs:
username - The name of the user account.
password - The password for the account. This is a required
parameter only if the provider is arcgis; otherwise,
the password parameter is ignored.
fullname - The full name for the user account.
email - The email address for the user account.
description - An optional description string for the user
account.
role - The role for the user account. The default value is
org_user.
Values: org_user | org_publisher | org_admin
provider - The provider for the account. The default value is
arcgis.
Values: arcgis | webadaptor | enterprise
idpUsername - name of the user on the domain controller.
Ex: domain\account
"""
url = self._url + "/users/createUser"
params = {
"f" : "json",
"username" : username,
"password" : password,
"fullname" : fullname,
"email" : email,
"role" : role,
"provider" : provider,
"description" : description
}
if idpUsername is None:
params['idpUsername'] = idpUsername
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateSecurityConfiguration(self,
enableAutomaticAccountCreation=False,
disableServicesDirectory=False
):
"""
This operation can be used to update the portal's security settings
such as whether or not enterprise accounts are automatically
registered as members of your ArcGIS organization the first time
they accesses the portal.
The security configuration is stored as a collection of properties
in a JSON object. The following properties are supported:
enableAutomaticAccountCreation
disableServicesDirectory
The automatic account creation flag (enableAutomaticAccountCreation)
determines the behavior for unregistered enterprise accounts the
first time they access the portal. When the value for this property
is set to false, first time users are not automatically registered
as members of your ArcGIS organization, and have the same access
privileges as other nonmembers. For these accounts to sign in, an
administrator must register the enterprise accounts using the
Create User operation.
The default value for the enableAutomaticAccountCreation property
is false. When this value is set to true, portal will add
enterprise accounts automatically as members of your ArcGIS
organization.
The disableServicesDirectory property controls whether the HTML
pages of the services directory should be accessible to the users.
The default value for this property is false, meaning the services
directory HTML pages are accessible to everyone.
"""
url = self._url + "/config/update"
params = {
"f" : "json",
"enableAutomaticAccountCreation": enableAutomaticAccountCreation,
"disableServicesDirectory" : disableServicesDirectory
}
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateIdenityStore(self,
userPassword,
user,
userFullnameAttribute,
ldapURLForUsers,
userEmailAttribute,
usernameAttribute,
isPasswordEncrypted=False,
caseSensitive=True):
"""
You can use this operation to change the identity provider
configuration in your portal. When Portal for ArcGIS is first
installed, it supports token-based authentication using the
built-in identity store for accounts. To configure your portal to
connect to your enterprise authentication mechanism, it must be
configured to use an enterprise identity store such as Windows
Active Directory or LDAP.
Inputs:
userPassword -The password for the domain account, for example,
secret.
isPasswordEncrypted - Indicates if the userPassword property is
an encrypted password or plain text. If
the property is false, the API will
encrypt the password automatically.
user - A user account with at least read permissions to look up
the email addresses and user names of users in your
organization. If possible, use an account whose password
does not expire.
Windows Active Directory example: mydomain\\winaccount
LDAP example: uid=admin\,ou=system
userFullnameAttribute - The attribute in Windows Active
Directory or LDAP that contains the full
name of the users, for example, cn.
ldapURLForUsers - The URL to your LDAP that points to the user
accounts, for example,
ldap://bar2:10389/ou=users\,ou=ags\,dc=example\,dc=com.
The URL to your LDAP will need to be provided
by your LDAP administrator.
This property is not applicable when
configuring Windows Active Directory.
userEmailAttribute - The attribute in Windows Active Directory
or LDAP that contains the email addresses
of the users, for example, email.
usernameAttribute - The LDAP attribute of the user entry that is
to be treated as the user name, for example, cn.
This property is not applicable when
configuring Windows Active Directory.
caseSensitive - In the rare case where your Windows Active
Directory is configured to be case sensitive,
set this property to true.
If your LDAP is configured to be case
insensitive, set parameter to false.
"""
url = self._url + "/config/updateIdentityStore"
params = {
"f" : "json",
"userPassword" : userPassword,
"isPasswordEncrypted" : isPasswordEncrypted,
"user" : user,
"userFullnameAttribute": userFullnameAttribute,
"ldapURLForUsers" : ldapURLForUsers,
"userEmailAttribute" : userEmailAttribute,
"usernameAttribute" : usernameAttribute,
"caseSensitive" : caseSensitive
}
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateTokenConfiguration(self, sharedKey):
"""
You can use this operation to change the shared key for the token
configuration. Shared keys are used to generate tokens and must be
of a suitable length to ensure strong encryption.
Input:
sharedKey - key used to generate token
"""
url = self._url + "/tokens/update"
params = {
"f" : "json",
"tokenConfig" : {"sharedKey" : sharedKey}
}
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def tokenConfigurations(self):
"""
This resource represents the token configuration within your portal
Use the Update Token Configuration operation to change the
configuration properties of the token service.
"""
url = self._url + "/tokens"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def securityConfiguration(self):
"""
The security configuration consists of the identity store
configuration.
If your portal will be authenticated through ArcGIS Web Adaptor,
you must set up your preferred authentication on your web server.
Use the Update Identity Store operation to configure your portal to
connect to your enterprise identity provider such as Windows Domain
or LDAP. By default, Portal for ArcGIS is configured to use the
built-in store and token-based authentication.
"""
url = self._url + "/config"
params = {
"f" : "json",
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def users(self):
""" returns the number of registered users on site """
url = self._url + "/users"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class _System(BaseAGOLClass):
"""
This resource is an umbrella for a collection of system-wide
resources for your portal. This resource provides access to the
ArcGIS Web Adaptor configuration, portal directories, database
management server, indexing capabilities, license information, and
the properties of your portal.
"""
_securityHandler = None
_url = None
_proxy_url = None
_proxy_port = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if securityHandler is not None:
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = url
#----------------------------------------------------------------------
@property
def webAdaptors(self):
"""
The Web Adaptors resource lists the ArcGIS Web Adaptor configured
with your portal. You can configure the Web Adaptor by using its
configuration web page or the command line utility provided with
the installation.
"""
url = self._url + "/webadaptors"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def webAdaptor(self, webAdaptorID):
"""
The ArcGIS Web Adaptor is a web application that runs in a
front-end web server. One of the Web Adaptor's primary
responsibilities is to forward HTTP requests from end users to
Portal for ArcGIS. The Web Adaptor acts a reverse proxy, providing
the end users with an entry point to the system, hiding the
back-end servers, and providing some degree of immunity from
back-end failures.
The front-end web server can authenticate incoming requests against
your enterprise identity stores and provide specific authentication
schemes such as Integrated Windows Authentication (IWA), HTTP Basic,
or Digest.
Most importantly, a Web Adaptor provides your end users with a
well-defined entry point to your system without exposing the
internal details of your portal. Portal for ArcGIS will trust
requests being forwarded by the Web Adaptor and will not challenge
the user for any credentials. However, the authorization of the
request (by looking up roles and permissions) is still enforced by
the portal's sharing rules.
Input:
webAdaptorID - id of the web adaptor
"""
url = self._url + "/webadaptors/%s" % webAdaptorID
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def unregisterWebAdaptor(self, webAdaptorID):
"""
You can use this operation to unregister the ArcGIS Web Adaptor
from your portal. Once a Web Adaptor has been unregistered, your
portal will no longer trust the Web Adaptor and will not accept any
credentials from it. This operation is typically used when you want
to register a new Web Adaptor or when your old Web Adaptor needs to
be updated.
Input:
webAdaptorID - id of the web adaptor
"""
url = self._url + "/webadaptors/%s/unregister" % webAdaptorID
params = {
"f" : "json"
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateWebAdaptorsConfiguration(self, webAdaptorsConfig):
"""
This operation is used to change the common properties and
configuration of the ArcGIS Web Adaptor configured with the portal.
The properties are stored as a JSON object and, therefore, every
update must include all the necessary properties.
Inputs:
webAdaptorsConfig - The JSON object containing all the properties
in the configuration.
"""
url = self._url + "/webadaptors/config/update"
params = {
"f" : "json",
"webAdaptorsConfig" : webAdaptorsConfig
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def webAdaptorsConfiguration(self):
"""
This resource is a collection of configuration properties that
apply to the ArcGIS Web Adaptor configured with the portal. The Web
Adaptor fetches these properties periodically, which alters its
behavior. Only one property is supported:
sharedSecret - This property represents credentials that are
shared with the Web Adaptor. The Web Adaptor uses
these credentials to communicate with the portal.
"""
url = self._url + "/webadaptors/config"
params = {
"f" : "json",
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def directories(self):
"""
The directories resource is a collection of directories that are
used by the portal to store and manage content. At 10.2.1, Portal
for ArcGIS supports five types of directories:
Content directory-The content directory contains the data
associated with every item in the portal.
Database directory-The built-in security store and sharing rules
are stored in a Database server that places
files in the database directory.
Temporary directory-The temporary directory is used as a scratch
workspace for all the portal's runtime
components.
Index directory-The index directory contains all the indexes
associated with the content in the portal. The
indexes are used for quick retrieval of
information and for querying purposes.
Logs directory-Errors and warnings are written to text files in
the log file directory. Each day, if new errors
or warnings are encountered, a new log file is
created.
If you would like to change the path for a directory, you can use
the Edit Directory operation.
"""
url = self._url + "/directories"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def directory(self, directoryName):
"""
A directory is a file system-based folder that contains a specific
type of content for the portal. The physicalPath property of a
directory locates the actual path of the folder on the file system.
At 10.2.1, Portal for ArcGIS supports local directories and network
shares as valid locations.
During the Portal for ArcGIS installation, the setup program asks
you for the root portal directory (that will contain all the
portal's sub directories). However, you can change each registered
directory through this API.
Input:
directoryName - name of diretory category
"""
url = self._url + "/directories/%s" % directoryName
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def editDirectory(self, directoryName, physicalPath, description):
"""
The edit operation on a directory can be used to change the
physical path and description properties of the directory. This is
useful when changing the location of a directory from a local path
to a network share. However, the API does not copy your content and
data from the old path to the new path. This has to be done
independently by the system administrator.
Input:
directoryName - name of the directory to change
physicalPath - new path for directroy
description - new description of the directory
"""
url = self._url + "/directories/%s/edit" % directoryName
params = {
"f" : "json",
"physicalPath": physicalPath,
"description" : description
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def database(self):
"""
The database resource represents the database management system
(DBMS) that contains all of the portal's configuration and
relationship rules. This resource also returns the name and version
of the database server currently running in the portal.
You can use the Update Database Account operation to edit the
administrative database account that is used by components within
the portal to communicate with the database server.
"""
url = self._url + "/database"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateDatabaseAccount(self, username,
password):
"""
By default, the initial administrator account you define during the
Create Site operation is used as the database administrator
account. However, you can use this operation to change the database
administrator account credentials. To change just the password for
the account, provide the password parameter. If you want to create
a completely new account for the database, provide new values for
the username and password parameters.
Input:
username - database user name
password - database user password
"""
url = self._url + "/database/updateAdminAccount"
params = {
"f" : "json",
"username" : username,
"password" : password
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def indexer(self):
"""
The indexer resource contains connection information to the default
indexing service. You can change its configuration properties such
as the port number and host name if you want the portal sharing API
to connect to and access another indexing service.
"""
url = self._url + "/indexer"
params = {
"f" : "json",
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def indexerStatus(self):
"""
The status resource allows you to view the status of the indexing
service. You can view the number of users, groups, relationships,
and search items in both the database (store) and the index.
If the database and index do not match, indexing is either in
progress or there is a problem with the index. It is recommended
that you reindex to correct any issues. If indexing is in progress,
you can monitor the status by refreshing the page.
"""
url = self._url + "/indexer/status"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def reindex(self, mode, includes=""):
"""
This operation allows you to generate or update the indexes for
content; such as users, groups, and items stored in the database
(store). During the process of upgrading an earlier version of
Portal for ArcGIS, you are required to update the indexes by
running this operation. You can check the status of your indexes
using the status resource.
Input:
mode - The mode in which the indexer should run.
Values: USER_MODE | GROUP_MODE | RELATION_MODE |
SEARCH_MODEL | FULL
includes An optional comma separated list of elements to
include in the index. This is useful if you want to
only index certain items or user accounts.
"""
url = self._url + "/indexer/reindex"
params = {
"f" : "json",
"mode" : mode,
"includes" : includes
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateIndexConfiguration(self,
indexerHost="localhost",
indexerPort=7199):
"""
You can use this operation to change the connection information for
the indexing service. By default, Portal for ArcGIS runs an
indexing service that runs on port 7199. If you want the sharing
API to refer to the indexing service on another instance, you need
to provide the host and port parameters.
Input:
indexerHost - The name of the server (hostname) on which the
index service runs. The default value is localhost
indexerPort - The port number on which the index service is
listening. The default value is 7199
"""
url = self._url + "/indexer/update"
params = {
"f" : "json",
"indexerHost": indexerHost,
"indexerPort": indexerPort
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def licenses(self):
"""
Portal for ArcGIS requires a valid license to function correctly.
This resource returns the current status of the license.
"""
url = self._url + "/licenses"
params = {
"f" : "json"
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class PortalAdministration(BaseAGOLClass):
"""
This is the root resource for administering your portal. Starting from
this root, all of the portal's environment is organized into a
hierarchy of resources and operations.
After installation, the portal can be configured using the Create Site
operation. Once initialized, the portal environment is available
through System and Security resources.
"""
_securityHandler = None
_url = None
_proxy_url = None
_proxy_port = None
_resources = None
_version = None
_json = None
_json_dict = None
#----------------------------------------------------------------------
def __init__(self, admin_url,
securityHandler,
proxy_url=None,
proxy_port=None,
initalize=False):
"""Constructor"""
if securityHandler is not None:
self._securityHandler = securityHandler
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = admin_url
if initalize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the site properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in manageportal.administration class."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""returns the raw key/values for the object"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.iteritems():
yield [k,v]
#----------------------------------------------------------------------
@property
def resources(self):
"""returns the admin sites resources"""
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
@property
def version(self):
"""returns the portal version"""
if self._version is None:
self.__init()
return self._version
#----------------------------------------------------------------------
def createSite(self,
username,
password,
fullname,
email,
securityQuerstionIdx,
securityQuestionAns,
description=""
):
"""
The create site operation initializes and configures Portal for
ArcGIS for use. It must be the first operation invoked after
installation.
Creating a new site involves:
Creating the initial administrator account
Creating a new database administrator account (which is same as
the initial administrator account)
Creating token shared keys
Registering directories
This operation is time consuming, as the database is initialized
and populated with default templates and content. If the database
directory is not empty, this operation attempts to migrate the
database to the current version while keeping its data intact. At
the end of this operation, the web server that hosts the API is
restarted.
Inputs:
username - The initial administrator account name
password - The password for the initial administrator account
fullname - The full name for the initial administrator account
email - The account email address
description - An optional description for the account
securityQuestionIdx - The index of the secret question to retrieve a forgotten password
securityQuestionAns - The answer to the secret question
"""
url = self._url + "/createNewSite"
params = {
"f" : "json",
"username" : username,
"password" : password,
"fullname" : fullname,
"email" : email,
"description" : description,
"securityQuerstionIdx" : securityQuerstionIdx,
"securityQuestionAns" : securityQuestionAns
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def system(self):
"""
Creates a reference to the System operations for Portal
"""
url = self._url + "/system"
return _System(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def security(self):
"""
Creates a reference to the Security operations for Portal
"""
url = self._url + "/security"
return _Security(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def logs(self):
"""returns the portals log information"""
url = self._url + "/logs"
return _log(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def root(self):
"""gets/sets the root admin url"""
return self._url
#----------------------------------------------------------------------
@root.setter
def root(self, value):
"""gets/sets the root admin url"""
if self._url != value:
self._url = value
|
|
"""
BSD 3-Clause License
Copyright (c) 2017, Gilberto Pastorello
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pynts.dataio: data input/output functions for timeseries data
@author: Gilberto Pastorello
@contact: gzpastorello@lbl.gov
@date: 2017-07-17
"""
import os
import logging
import numpy
from datetime import datetime
from pynts import PyntsError
_log = logging.getLogger(__name__)
def get_headers(filename, headerline=1):
"""
Parse headers format and returns
list of strings with header labels.
Must have at least two columns, one with timestamps.
:param filename: name of the file to be loaded
:type filename: str
:param headerline: line number for column headers (starting from 1)
:type headerline: int
"""
with open(filename, 'r') as f:
lnum = 0
while lnum < headerline:
line = f.readline()
lnum += 1
headers = line.strip().split(',')
if len(headers) < 2:
raise PyntsError("Headers too short: '{h}'".format(h=line))
headers = [i.strip() for i in headers]
_log.debug("Got headers: {h}".format(h=headers))
return headers
STRTEST_STANDARD = ['TIMESTAMP', 'TIMESTAMP_START', 'TIMESTAMP_END']
def get_dtype(variable, strtest=STRTEST_STANDARD):
"""
Returns data type based on variable label (case insensitive).
Timestamp variables are str, and all others 64bit/8byte floats
:param variable: variable label
:type variable: str
:param strtest: list of strings containing timestamps (str data tyope will be used)
:type: list
"""
for s in strtest:
if s.lower() in variable.lower():
return 'a25'
return 'f8'
def get_fill_value(dtype):
"""
Returns string fill value based on data type.
:param dtype: data type for variable
:type dtype: str
"""
if dtype == 'a25':
return ''
elif dtype == 'i8':
return -9999
else:
return numpy.NaN
def get_timestamp_format_from_resolution(sample):
"""
Returns strptime/strftime compatible format based on sample ISO timestamp
(length-base, strips blanks)
:param sample: sample ISO timestamp
:type sample: str
"""
s = sample.strip()
# year only
if len(s) == 4:
tformat = "%Y"
# year, month
elif len(s) == 6:
tformat = "%Y%m"
# year, day of year
elif len(s) == 7:
tformat = "%Y%j"
# year, month, day
elif len(s) == 8:
tformat = "%Y%m%d"
# year, month, day, hour
elif len(s) == 10:
tformat = "%Y%m%d%H"
# year, month, day, hour, minute
elif len(s) == 12:
tformat = "%Y%m%d%H%M"
# year, month, day, hour, minute, second
elif len(s) == 14:
tformat = "%Y%m%d%H%M%S"
# unknown
else:
raise PyntsError("Unknown time format '{f}'".format(f=sample))
# raises error if doesn't match sample
try:
_ = datetime.strptime(s, tformat)
except TypeError:
_ = datetime.strptime(s.decode('UTF8'), tformat)
return tformat
MISSING_VALUES_STANDARD = 'nan,NAN,NaN,-9999,-9999.0,-6999,-6999.0, '
def load_csv(filename, delimiter=',', headerline=1, first_dataline=None, timestamp_labels=STRTEST_STANDARD[0:1], missing=MISSING_VALUES_STANDARD):
"""
Loads timeseries data from column oriented CSV file
with at least one timestamp column
:param filename: name of file to be written (fails if doesn't exists)
:type filename: str
:param delimiter: cell delimiter character (e.g., ',' or '\t')
:type delimiter: str
:param headerline: line number for column headers (starting from 1)
:type headerline: int
:param first_dataline: line number for first data record (starting from 1)
:type first_dataline: int
:param timestamp_labels: list of timestamp column labels in file (will only use first)
:type timestamp_labels: list
:param missing: comma-separated string with all entries that should be treated as missing
:param type: str
"""
if first_dataline is None:
first_dataline = headerline + 1
_log.debug("Started loading: {f}".format(f=filename))
headers = get_headers(filename=filename, headerline=headerline)
dtype = [(h, get_dtype(h, strtest=timestamp_labels)) for h in headers]
fill_values = [get_fill_value(dtype=d[1]) for d in dtype]
data = numpy.genfromtxt(fname=filename, dtype=dtype, names=True, delimiter=",", skip_header=first_dataline - 2, missing_values=missing, usemask=True)
data = numpy.atleast_1d(data)
data = numpy.ma.filled(data, fill_values)
timestamp_label = timestamp_labels[0]
tformat = get_timestamp_format_from_resolution(data[timestamp_label][0])
try:
timestamp = [datetime.strptime(i, tformat) for i in data[timestamp_label]]
except TypeError:
timestamp = [datetime.strptime(i.decode('UTF8'), tformat) for i in data[timestamp_label]]
_log.debug("Finished loading: {f}".format(f=filename))
return data, timestamp
def save_csv(filename, data, delimiter=',', newline='\n', header=None):
"""
Saves data array into csv file.
Saves missing values as -9999
:param filename: name of file to be written (overwrites if exists)
:type filename: str
:param data: data array
:type data: numpy.ndarray
:param delimiter: cell delimiter character (e.g., ',' or '\t')
:type delimiter: str
:param newline: new line character
:type newline: str
:param header: header to be written before data (array labels used if none)
:type header: str
"""
_log.debug("Started saving: {f}".format(f=filename))
if header is None:
header = delimiter.join(data.dtype.names)
with open(filename, 'w') as f:
f.write(header + newline)
for i, row in enumerate(data):
if i % 1000 == 0:
_log.debug("Writing {f}: line {l}".format(f=filename, l=i))
line = delimiter.join("-9999" if (value == -9999.0 or value == -9999.9) else str(value) for value in row)
f.write(line + newline)
_log.debug("Finished saving: {f}".format(f=filename))
if __name__ == '__main__':
raise PyntsError('Not executable')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import lock_server
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.policies import base as base_policy
from nova.policies import lock_server as ls_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
CONF = nova.conf.CONF
class LockServerPolicyTest(base.BasePolicyTest):
"""Test Lock server APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(LockServerPolicyTest, self).setUp()
self.controller = lock_server.LockServerController()
self.req = fakes.HTTPRequest.blank('')
user_id = self.req.environ['nova.context'].user_id
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuid, project_id=self.project_id,
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
# With legacy rule and no scope checks, all admin, project members
# project reader or other project role(because legacy rule allow server
# owner- having same project id and no role check) is able to lock,
# unlock the server.
self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
# By default, legacy rule are enable and scope check is disabled.
# system admin, legacy admin, and project admin is able to override
# unlock, regardless who locked the server.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.API.lock')
def test_lock_server_policy(self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._lock,
self.req, self.instance.uuid,
body={'lock': {}})
@mock.patch('nova.compute.api.API.unlock')
def test_unlock_server_policy(self, mock_unlock):
rule_name = ls_policies.POLICY_ROOT % 'unlock'
self.common_policy_auth(self.project_action_authorized_contexts,
rule_name,
self.controller._unlock,
self.req, self.instance.uuid,
body={'unlock': {}})
@mock.patch('nova.compute.api.API.unlock')
@mock.patch('nova.compute.api.API.is_expected_locked_by')
def test_unlock_override_server_policy(self, mock_expected, mock_unlock):
mock_expected.return_value = False
rule = ls_policies.POLICY_ROOT % 'unlock'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
if not CONF.oslo_policy.enforce_scope:
check_rule = rule_name
else:
check_rule = functools.partial(base.rule_if_system,
rule, rule_name)
self.common_policy_auth(self.project_admin_authorized_contexts,
check_rule,
self.controller._unlock,
self.req, self.instance.uuid,
body={'unlock': {}})
def test_lock_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller._lock,
req, fakes.FAKE_UUID, body={'lock': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.lock')
def test_lock_sevrer_overridden_policy_pass_with_same_user(
self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.controller._lock(self.req,
fakes.FAKE_UUID,
body={'lock': {}})
class LockServerNoLegacyNoScopePolicyTest(LockServerPolicyTest):
"""Test lock/unlock server APIs policies with no legacy deprecated rules
and no scope checks which means new defaults only.
"""
without_deprecated_rules = True
def setUp(self):
super(LockServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to lock/unlock the server and only project admin can
# override the unlock.
self.project_action_authorized_contexts = [
self.project_admin_context, self.project_member_context]
self.project_admin_authorized_contexts = [self.project_admin_context]
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
"""Test Lock Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to lock/unlock the server.
self.project_action_authorized_contexts = [
self.legacy_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
class LockServerScopeTypeNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
"""Test Lock Server APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
super(LockServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to lock/unlock the server.
self.project_action_authorized_contexts = [
self.project_admin_context, self.project_member_context]
self.project_admin_authorized_contexts = [self.project_admin_context]
class LockServerOverridePolicyTest(LockServerScopeTypeNoLegacyPolicyTest):
"""Test Lock Server APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
and no more deprecated rules.
"""
def setUp(self):
super(LockServerOverridePolicyTest, self).setUp()
# We are overriding the 'unlock:unlock_override' policy
# to PROJECT_MEMBER so testing it with both admin as well
# as project member as allowed context.
self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
def test_unlock_override_server_policy(self):
rule = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
self.policy.set_rules({
# make unlock allowed for everyone so that we can check unlock
# override policy.
ls_policies.POLICY_ROOT % 'unlock': "@",
rule: base_policy.PROJECT_MEMBER}, overwrite=False)
super(LockServerOverridePolicyTest,
self).test_unlock_override_server_policy()
|
|
"""Unit tests for reviewboard.admin.views.dashboard."""
from __future__ import unicode_literals
from django.utils import six
from kgb import SpyAgency
from reviewboard.admin.views import logger
from reviewboard.admin.widgets import BaseAdminWidget, admin_widgets_registry
from reviewboard.testing.testcase import TestCase
class MyWidget(BaseAdminWidget):
widget_id = 'my-widget'
name = 'My Widget'
css_classes = 'test-c-my-widget -is-large'
def get_js_model_attrs(self, request):
return {
'test_attr': 'test-value',
}
def get_js_model_options(self, request):
return {
'test_option': 'test-value',
}
def get_js_view_options(self, request):
return {
'test_option': 'test-value',
}
class HiddenWidget(BaseAdminWidget):
widget_id = 'hidden-widget'
name = 'Hidden Widget'
def can_render(self, request):
return False
class AdminDashboardViewTests(SpyAgency, TestCase):
"""Unit tests for reviewboard.admin.views.admin_dashboard_view."""
fixtures = ['test_users']
def test_get(self):
"""Testing admin_dashboard_view"""
admin_widgets_registry.register(MyWidget)
try:
self.client.login(username='admin', password='admin')
response = self.client.get('/admin/')
self.assertEqual(response.status_code, 200)
self.assertIn('page_model_attrs', response.context)
widgets = response.context['page_model_attrs']['widgetsData']
self.assertEqual(len(widgets), len(admin_widgets_registry))
self.assertEqual(
widgets[-1],
{
'id': 'my-widget',
'domID': 'admin-widget-my-widget',
'viewClass': 'RB.Admin.WidgetView',
'modelClass': 'RB.Admin.Widget',
'viewOptions': {
'test_option': 'test-value',
},
'modelAttrs': {
'test_attr': 'test-value',
},
'modelOptions': {
'test_option': 'test-value',
},
})
self.assertIn(
b'<div class="rb-c-admin-widget test-c-my-widget -is-large"'
b' id="admin-widget-my-widget">',
response.content)
finally:
admin_widgets_registry.unregister(MyWidget)
def test_get_with_widget_can_render_false(self):
"""Testing admin_dashboard_view with widget.can_render() == False"""
admin_widgets_registry.register(HiddenWidget)
try:
self.client.login(username='admin', password='admin')
response = self.client.get('/admin/')
self.assertEqual(response.status_code, 200)
self.assertIn('page_model_attrs', response.context)
widgets = response.context['page_model_attrs']['widgetsData']
self.assertEqual(len(widgets), len(admin_widgets_registry) - 1)
self.assertNotEqual(widgets[-1]['id'], 'hidden-widget')
finally:
admin_widgets_registry.unregister(HiddenWidget)
def test_get_with_broken_widget_init(self):
"""Testing admin_dashboard_view with broken widget.__init__"""
error_msg = '__init__ broke'
class BrokenWidget(BaseAdminWidget):
widget_id = 'broken-widget'
def __init__(self):
raise Exception(error_msg)
self._test_broken_widget(BrokenWidget, error_msg)
def test_get_with_broken_widget_can_render(self):
"""Testing admin_dashboard_view with broken widget.can_render"""
error_msg = 'can_render broke'
class BrokenWidget(BaseAdminWidget):
widget_id = 'broken-widget'
def can_render(self, request):
raise Exception(error_msg)
self._test_broken_widget(BrokenWidget, error_msg)
def test_get_with_broken_widget_get_js_view_options(self):
"""Testing admin_dashboard_view with broken widget.get_js_view_options
"""
error_msg = 'get_js_view_options broke'
class BrokenWidget(BaseAdminWidget):
widget_id = 'broken-widget'
def get_js_view_options(self, request):
raise Exception(error_msg)
self._test_broken_widget(BrokenWidget, error_msg)
def test_get_with_broken_widget_get_js_model_attrs(self):
"""Testing admin_dashboard_view with broken widget.get_js_model_attrs
"""
error_msg = 'get_js_model_attrs broke'
class BrokenWidget(BaseAdminWidget):
widget_id = 'broken-widget'
def get_js_model_attrs(self, request):
raise Exception(error_msg)
self._test_broken_widget(BrokenWidget, error_msg)
def test_get_with_broken_widget_get_js_model_options(self):
"""Testing admin_dashboard_view with broken widget.get_js_model_options
"""
error_msg = 'get_js_model_options broke'
class BrokenWidget(BaseAdminWidget):
widget_id = 'broken-widget'
def get_js_model_options(self, request):
raise Exception(error_msg)
self._test_broken_widget(BrokenWidget, error_msg)
def test_get_with_broken_widget_render(self):
"""Testing admin_dashboard_view with broken widget.render"""
error_msg = 'render broke'
class BrokenWidget(BaseAdminWidget):
widget_id = 'broken-widget'
def render(self, request):
raise Exception(error_msg)
self._test_broken_widget(BrokenWidget, error_msg)
def _test_broken_widget(self, widget_cls, expected_msg):
"""Test that a broken widget doesn't break the dashboard view.
Args:
widget_cls (type):
The broken widget to register and test against.
expected_msg (unicode, optional):
The expected error message raised by the broken method.
"""
admin_widgets_registry.register(widget_cls)
self.spy_on(logger.exception)
try:
self.client.login(username='admin', password='admin')
response = self.client.get('/admin/')
self.assertEqual(response.status_code, 200)
self.assertIn('page_model_attrs', response.context)
widgets = response.context['page_model_attrs']['widgetsData']
self.assertEqual(len(widgets), len(admin_widgets_registry) - 1)
self.assertNotEqual(widgets[-1]['id'], widget_cls.widget_id)
self.assertNotIn(
b'id="admin-widget-%s"' % widget_cls.widget_id.encode('utf-8'),
response.content)
spy_call = logger.exception.last_call
self.assertEqual(spy_call.args[0],
'Error setting up administration widget %r: %s')
self.assertEqual(spy_call.args[1], widget_cls)
self.assertIsInstance(spy_call.args[2], Exception)
self.assertEqual(six.text_type(spy_call.args[2]), expected_msg)
finally:
admin_widgets_registry.unregister(widget_cls)
|
|
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a deps.js file, a manifest in dependency
order, a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
absolute paths for comparison.
"""
for js_source in sources:
# Convert both to absolute paths for comparison.
if os.path.abspath(path) == os.path.abspath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
filtered_base_files = filter(_IsClosureBaseFile, sources)
if not filtered_base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(filtered_base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in filtered_base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return filtered_base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
if os.path.basename(js_source.GetPath()) == 'base.js':
# Sanity check that this is the Closure base file. Check that this
# is where goog is defined.
for line in js_source.GetSource().splitlines():
if line.startswith('var goog = goog || {};'):
return True
return False
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def GetPath(self):
"""Returns the path."""
return self._path
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for js_path in args:
sources.add(_PathSource(js_path))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
out.writelines([js_source.GetSource() for js_source in deps])
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
compiled_source = jscompiler.Compile(
options.compiler_jar,
[js_source.GetPath() for js_source in deps],
options.compiler_flags)
if compiled_source is None:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
logging.info('JavaScript compilation succeeded.')
out.write(compiled_source)
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
|
|
from Cocoa import NSStringPboardType, NSDragOperationMove, NSArray
import vanilla
from defconAppKit.windows.baseWindow import BaseWindowController
from defconAppKit.windows.progressWindow import ProgressWindow
from lib.scripting.codeEditor import CodeEditor
from robofab.world import AllFonts
class ProcessFonts(BaseWindowController):
def __init__(self, message="Choose Fonts", function=None, show_results=True, width=400, height=300):
"""Open a window containing a list of all open fonts, to select some fonts and process them with a supplied function.
message: The title to display in the title bar, e. g. the name of your script
function: A function that will be called for each selected font with the RFont as its argument
show_results: Boolean to indicate if your function returns a result which should be displayed in the result box
width: The initial width of the window (optional)
height: The initial height of the window (optional)
Select and double-click rows in the result list to copy them to the pasteboard."""
self.w = vanilla.Window(
(width, height),
message,
(400, 300),
)
self.function = function
column_descriptions = [
{
"title": "Font",
"typingSensitive": True,
"editable": False,
},
{
"title": "Result",
},
]
self.w.message = vanilla.TextBox(
(10, 10, -10, 30),
"Select fonts to process:",
)
self.w.font_list = vanilla.List(
(10, 40, -10, 100),
[],
columnDescriptions = column_descriptions,
drawFocusRing = True,
allowsMultipleSelection = True,
doubleClickCallback = self.copy_result,
selectionCallback = self.show_result,
selfDropSettings = {
"type": NSStringPboardType,
"operation": NSDragOperationMove,
"callback": self.list_drop,
},
dragSettings = {
"type": NSStringPboardType,
"dropDataFormat": None,
"callback": self.list_drag,
},
)
self.w.result_box = CodeEditor((10, 150, -10, -42), "", lexer="text")
self.w.copy_button = vanilla.Button(
(10, -32, 110, -10),
"Copy results",
callback = self.copy_result,
)
self.w.cancel_button = vanilla.Button(
(-180, -32, -100, -10),
"Cancel",
callback = self.cancel,
)
self.w.ok_button = vanilla.Button(
(-90, -32, -10, -10),
"Process",
callback = self.ok,
)
self.setUpBaseWindowBehavior()
self._drag_src_rows = []
self.update_font_list()
self.w.open()
def cancel(self, sender=None):
self.w.close()
def ok(self, sender=None):
self.w.cancel_button.enable(False)
self.w.ok_button.enable(False)
self.w.copy_button.enable(False)
fonts = self.w.font_list.getSelection()
progress = ProgressWindow(
"",
tickCount = len(fonts),
parentWindow = self.w,
)
results = []
for i in range(len(AllFonts())):
font = AllFonts()[i]
if i in fonts:
progress.update("Processing %s %s ..." % (font.info.familyName, font.info.styleName))
result = self.function(font)
if result is None:
result = "Unknown"
results.append(
{
"Font": "%s %s" % (font.info.familyName, font.info.styleName),
"Result": result,
}
)
else:
results.append(
{
"Font": "%s %s" % (font.info.familyName, font.info.styleName),
"Result": self.w.font_list.get()[i]["Result"],
}
)
progress.close()
self.w.font_list.set(results)
self.w.font_list.setSelection(fonts)
self.w.cancel_button.setTitle("Close")
self.w.cancel_button.enable(True)
self.w.ok_button.enable(True)
self.w.copy_button.enable(True)
def copy_result(self, sender):
from string import strip
from AppKit import NSPasteboard, NSArray
s = u""
results = self.w.font_list.getSelection()
for i in results:
s += self.w.font_list.get()[i]["Font"] + "\n\n"
s += self.w.font_list.get()[i]["Result"] + "\n\n\n"
pb = NSPasteboard.generalPasteboard()
pb.clearContents()
a = NSArray.arrayWithObject_(s.strip("\n"))
pb.writeObjects_(a)
def show_result(self, sender=None):
results = self.w.font_list.getSelection()
if len(results) > 0:
self.w.result_box.set(self.w.font_list.get()[results[0]]["Result"])
selection_empty = False
else:
self.w.result_box.set("")
selection_empty = True
if selection_empty:
self.w.ok_button.enable(False)
self.w.copy_button.enable(False)
else:
self.w.ok_button.enable(True)
self.w.copy_button.enable(True)
def update_font_list(self):
font_list = []
self.w.font_list.set(
[
{
"Font": "%s %s" % (
AllFonts()[i].info.familyName,
AllFonts()[i].info.styleName
),
"Result": ""
} for i in range(len(AllFonts()))
]
)
def list_drag(self, sender=None, drop_info=None):
self._drag_src_rows = drop_info
print "Drag Source Rows:", drop_info
data = [self.w.font_list[i] for i in drop_info]
return data
def list_drop(self, sender=None, drop_info=None):
if drop_info is not None:
if drop_info["isProposal"]:
# TODO: check if drop is acceptable
return True
else:
print "DEBUG: dropped item in position %i" % drop_info["rowIndex"]
print " Data: %s" % drop_info["data"]
print drop_info
# TODO: accept the drop (actually do something)
self.insert_data(drop_info["data"], drop_info["rowIndex"])
return True
def delete_rows(self, row_index_list):
for row_index in row_index_list:
del self.w.font_list[i]
def insert_data(self, data, row_index):
print "insert_data"
#print type(data), data
for i in range(len(data)-1, -1, -1):
print "Insert:", row_index, i, data[i]
self.w.font_list.insert(row_index, data[i])
def windowCloseCallback(self, sender):
super(ProcessFonts, self).windowCloseCallback(sender)
if __name__ == "__main__":
ProcessFonts()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A type for representing values that may or may not exist."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.data.util import structure
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("experimental.Optional", "data.experimental.Optional")
@deprecation.deprecated_endpoints("data.experimental.Optional")
@six.add_metaclass(abc.ABCMeta)
class Optional(composite_tensor.CompositeTensor):
"""Represents a value that may or may not be present.
A `tf.experimental.Optional` can represent the result of an operation that may
fail as a value, rather than raising an exception and halting execution. For
example, `tf.data.Iterator.get_next_as_optional()` returns a
`tf.experimental.Optional` that either contains the next element of an
iterator if one exists, or an "empty" value that indicates the end of the
sequence has been reached.
`tf.experimental.Optional` can only be used with values that are convertible
to `tf.Tensor` or `tf.CompositeTensor`.
One can create a `tf.experimental.Optional` from a value using the
`from_value()` method:
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
or without a value using the `empty()` method:
>>> optional = tf.experimental.Optional.empty(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
"""
@abc.abstractmethod
def has_value(self, name=None):
"""Returns a tensor that evaluates to `True` if this optional has a value.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.bool`.
"""
raise NotImplementedError("Optional.has_value()")
@abc.abstractmethod
def get_value(self, name=None):
"""Returns the value wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates to
`False`), this operation will raise `tf.errors.InvalidArgumentError` at
runtime.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
Args:
name: (Optional.) A name for the created operation.
Returns:
The wrapped value.
"""
raise NotImplementedError("Optional.get_value()")
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this optional.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.element_spec)
tf.TensorSpec(shape=(), dtype=tf.int32, name=None)
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this optional, specifying the type of individual components.
"""
raise NotImplementedError("Optional.element_spec")
@staticmethod
def empty(element_spec):
"""Returns an `Optional` that has no value.
NOTE: This method takes an argument that defines the structure of the value
that would be contained in the returned `Optional` if it had a value.
>>> optional = tf.experimental.Optional.empty(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
Args:
element_spec: A nested structure of `tf.TypeSpec` objects matching the
structure of an element of this optional.
Returns:
A `tf.experimental.Optional` with no value.
"""
return _OptionalImpl(gen_dataset_ops.optional_none(), element_spec)
@staticmethod
def from_value(value):
"""Returns a `tf.experimental.Optional` that wraps the given value.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
Args:
value: A value to wrap. The value must be convertible to `tf.Tensor` or
`tf.CompositeTensor`.
Returns:
A `tf.experimental.Optional` that wraps `value`.
"""
with ops.name_scope("optional") as scope:
with ops.name_scope("value"):
element_spec = structure.type_spec_from_value(value)
encoded_value = structure.to_tensor_list(element_spec, value)
return _OptionalImpl(
gen_dataset_ops.optional_from_value(encoded_value, name=scope),
element_spec)
class _OptionalImpl(Optional):
"""Concrete implementation of `tf.experimental.Optional`.
NOTE(mrry): This implementation is kept private, to avoid defining
`Optional.__init__()` in the public API.
"""
def __init__(self, variant_tensor, element_spec):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
def has_value(self, name=None):
return gen_dataset_ops.optional_has_value(self._variant_tensor, name=name)
def get_value(self, name=None):
# TODO(b/110122868): Consolidate the restructuring logic with similar logic
# in `Iterator.get_next()` and `StructuredFunctionWrapper`.
with ops.name_scope(name, "OptionalGetValue",
[self._variant_tensor]) as scope:
return structure.from_tensor_list(
self._element_spec,
gen_dataset_ops.optional_get_value(
self._variant_tensor,
name=scope,
output_types=structure.get_flat_tensor_types(
self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self._element_spec)))
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return OptionalSpec.from_value(self)
@tf_export(
"OptionalSpec", v1=["OptionalSpec", "data.experimental.OptionalStructure"])
class OptionalSpec(type_spec.TypeSpec):
"""Type specification for `tf.experimental.Optional`.
For instance, `tf.OptionalSpec` can be used to define a tf.function that takes
`tf.experimental.Optional` as an input argument:
>>> @tf.function(input_signature=[tf.OptionalSpec(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))])
... def maybe_square(optional):
... if optional.has_value():
... x = optional.get_value()
... return x * x
... return -1
>>> optional = tf.experimental.Optional.from_value(5)
>>> print(maybe_square(optional))
tf.Tensor(25, shape=(), dtype=int32)
Attributes:
element_spec: A nested structure of `TypeSpec` objects that represents the
type specification of the optional element.
"""
__slots__ = ["_element_spec"]
def __init__(self, element_spec):
self._element_spec = element_spec
@property
def value_type(self):
return _OptionalImpl
def _serialize(self):
return (self._element_spec,)
@property
def _component_specs(self):
return [tensor_spec.TensorSpec((), dtypes.variant)]
def _to_components(self, value):
return [value._variant_tensor] # pylint: disable=protected-access
def _from_components(self, flat_value):
# pylint: disable=protected-access
return _OptionalImpl(flat_value[0], self._element_spec)
@staticmethod
def from_value(value):
return OptionalSpec(value.element_spec)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
|
|
# Copyright (c) 2013 Huawei Technologies Co., Ltd.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common class for Huawei HVS storage drivers."""
import base64
import cookielib
import json
import time
import urllib2
import uuid
from xml.etree import ElementTree as ET
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QOS_KEY = ["Qos-high", "Qos-normal", "Qos-low"]
TIER_KEY = ["Tier-high", "Tier-normal", "Tier-low"]
class HVSCommon():
"""Common class for Huawei OceanStor HVS storage system."""
def __init__(self, configuration):
self.configuration = configuration
self.cookie = cookielib.CookieJar()
self.url = None
self.xml_conf = self.configuration.cinder_huawei_conf_file
def call(self, url=False, data=None, method=None):
"""Send requests to HVS server.
Send HTTPS call, get response in JSON.
Convert response into Python Object and return it.
"""
LOG.debug('HVS Request URL: %(url)s' % {'url': url})
LOG.debug('HVS Request Data: %(data)s' % {'data': data})
headers = {"Connection": "keep-alive",
"Content-Type": "application/json"}
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
urllib2.install_opener(opener)
try:
urllib2.socket.setdefaulttimeout(720)
req = urllib2.Request(url, data, headers)
if method:
req.get_method = lambda: method
res = urllib2.urlopen(req).read().decode("utf-8")
LOG.debug('HVS Response Data: %(res)s' % {'res': res})
except Exception as err:
err_msg = _('Bad response from server: %s') % err
LOG.error(err_msg)
raise err
try:
res_json = json.loads(res)
except Exception as err:
LOG.error(_('JSON transfer error'))
raise err
return res_json
def login(self):
"""Log in HVS array.
If login failed, the driver will sleep 30's to avoid frequent
connection to the server.
"""
login_info = self._get_login_info()
url = login_info['HVSURL'] + "xx/sessions"
data = json.dumps({"username": login_info['UserName'],
"password": login_info['UserPassword'],
"scope": "0"})
result = self.call(url, data)
if (result['error']['code'] != 0) or ("data" not in result):
time.sleep(30)
msg = _("Login error, reason is %s") % result
LOG.error(msg)
raise exception.CinderException(msg)
deviceid = result['data']['deviceid']
self.url = login_info['HVSURL'] + deviceid
return deviceid
def _init_tier_parameters(self, parameters, lunparam):
"""Init the LUN parameters through the volume type "performance"."""
if "tier" in parameters:
smart_tier = parameters['tier']
if smart_tier == 'Tier_high':
lunparam['INITIALDISTRIBUTEPOLICY'] = "1"
elif smart_tier == 'Tier_normal':
lunparam['INITIALDISTRIBUTEPOLICY'] = "2"
elif smart_tier == 'Tier_low':
lunparam['INITIALDISTRIBUTEPOLICY'] = "3"
else:
lunparam['INITIALDISTRIBUTEPOLICY'] = "2"
def _init_lun_parameters(self, name, parameters):
"""Init basic LUN parameters."""
lunparam = {"TYPE": "11",
"NAME": name,
"PARENTTYPE": "216",
"PARENTID": parameters['pool_id'],
"DESCRIPTION": "",
"ALLOCTYPE": parameters['LUNType'],
"CAPACITY": parameters['volume_size'],
"WRITEPOLICY": parameters['WriteType'],
"MIRRORPOLICY": parameters['MirrorSwitch'],
"PREFETCHPOLICY": parameters['PrefetchType'],
"PREFETCHVALUE": parameters['PrefetchValue'],
"DATATRANSFERPOLICY": "1",
"INITIALDISTRIBUTEPOLICY": "0"}
return lunparam
def _init_qos_parameters(self, parameters, lun_param):
"""Init the LUN parameters through the volume type "Qos-xxx"."""
policy_id = None
policy_info = None
if "qos" in parameters:
policy_info = self._find_qos_policy_info(parameters['qos'])
if policy_info:
policy_id = policy_info['ID']
lun_param['IOClASSID'] = policy_info['ID']
qos_level = parameters['qos_level']
if qos_level == 'Qos-high':
lun_param['IOPRIORITY'] = "3"
elif qos_level == 'Qos-normal':
lun_param['IOPRIORITY'] = "2"
elif qos_level == 'Qos-low':
lun_param['IOPRIORITY'] = "1"
else:
lun_param['IOPRIORITY'] = "2"
return (policy_info, policy_id)
def _assert_rest_result(self, result, err_str):
error_code = result['error']['code']
if error_code != 0:
msg = _('%(err)s\nresult: %(res)s') % {'err': err_str,
'res': result}
LOG.error(msg)
raise exception.CinderException(msg)
def _assert_data_in_result(self, result, msg):
if "data" not in result:
err_msg = _('%s "data" was not in result.') % msg
LOG.error(err_msg)
raise exception.CinderException(err_msg)
def _create_volume(self, lun_param):
url = self.url + "/lun"
data = json.dumps(lun_param)
result = self.call(url, data)
msg = 'Create volume error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def create_volume(self, volume):
volume_name = self._encode_name(volume['id'])
config_params = self._parse_volume_type(volume)
# Prepare lun parameters, including qos parameter and tier parameter.
lun_param = self._init_lun_parameters(volume_name, config_params)
self._init_tier_parameters(config_params, lun_param)
policy_info, policy_id = self._init_qos_parameters(config_params,
lun_param)
# Create LUN in array
lunid = self._create_volume(lun_param)
# Enable qos, need to add lun into qos policy
if "qos" in config_params:
lun_list = policy_info['LUNLIST']
lun_list.append(lunid)
if policy_id:
self._update_qos_policy_lunlist(lun_list, policy_id)
else:
LOG.warn(_("Can't find the Qos policy in array"))
# Create lun group and add LUN into to lun group
lungroup_id = self._create_lungroup(volume_name)
self._associate_lun_to_lungroup(lungroup_id, lunid)
return lunid
def _get_volume_size(self, poolinfo, volume):
"""Calculate the volume size.
We should divide the given volume size by 512 for the HVS system
calculates volume size with sectors, which is 512 bytes.
"""
volume_size = units.Gi / 512 # 1G
if int(volume['size']) != 0:
volume_size = int(volume['size']) * units.Gi / 512
return volume_size
def delete_volume(self, volume):
"""Delete a volume.
Three steps: first, remove associate from lun group.
Second, remove associate from qos policy. Third, remove the lun.
"""
name = self._encode_name(volume['id'])
lun_id = self._get_volume_by_name(name)
lungroup_id = self._find_lungroup(name)
if lun_id and lungroup_id:
self._delete_lun_from_qos_policy(volume, lun_id)
self._delete_associated_lun_from_lungroup(lungroup_id, lun_id)
self._delete_lungroup(lungroup_id)
self._delete_lun(lun_id)
else:
LOG.warn(_("Can't find lun or lun group in array"))
def _delete_lun_from_qos_policy(self, volume, lun_id):
"""Remove lun from qos policy."""
parameters = self._parse_volume_type(volume)
if "qos" in parameters:
qos = parameters['qos']
policy_info = self._find_qos_policy_info(qos)
if policy_info:
lun_list = policy_info['LUNLIST']
for item in lun_list:
if lun_id == item:
lun_list.remove(item)
self._update_qos_policy_lunlist(lun_list, policy_info['ID'])
def _delete_lun(self, lun_id):
url = self.url + "/lun/" + lun_id
data = json.dumps({"TYPE": "11",
"ID": lun_id})
result = self.call(url, data, "DELETE")
self._assert_rest_result(result, 'delete lun error')
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes)
newuuid = vol_encoded.replace("=", "")
return newuuid
def _find_pool_info(self):
root = huawei_utils.parse_xml_file(self.xml_conf)
pool_name = root.findtext('LUN/StoragePool')
if not pool_name:
err_msg = _("Invalid resource pool: %s") % pool_name
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
url = self.url + "/storagepool"
result = self.call(url, None)
self._assert_rest_result(result, 'Query resource pool error')
poolinfo = {}
if "data" in result:
for item in result['data']:
if pool_name.strip() == item['NAME']:
poolinfo['ID'] = item['ID']
poolinfo['CAPACITY'] = item['USERFREECAPACITY']
poolinfo['TOTALCAPACITY'] = item['USERTOTALCAPACITY']
break
if not poolinfo:
msg = (_('Get pool info error, pool name is:%s') % pool_name)
LOG.error(msg)
raise exception.CinderException(msg)
return poolinfo
def _get_volume_by_name(self, name):
url = self.url + "/lun"
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get volume by name error!')
volume_id = None
if "data" in result:
for item in result['data']:
if name == item['NAME']:
volume_id = item['ID']
break
return volume_id
def _active_snapshot(self, snapshot_id):
activeurl = self.url + "/snapshot/activate"
data = json.dumps({"SNAPSHOTLIST": [snapshot_id]})
result = self.call(activeurl, data)
self._assert_rest_result(result, 'Active snapshot error.')
def _create_snapshot(self, snapshot):
snapshot_name = self._encode_name(snapshot['id'])
volume_name = self._encode_name(snapshot['volume_id'])
LOG.debug('create_snapshot:snapshot name:%(snapshot)s, '
'volume name:%(volume)s.'
% {'snapshot': snapshot_name,
'volume': volume_name})
lun_id = self._get_volume_by_name(volume_name)
url = self.url + "/snapshot"
data = json.dumps({"TYPE": "27",
"NAME": snapshot_name,
"PARENTTYPE": "11",
"PARENTID": lun_id})
result = self.call(url, data)
msg = 'Create snapshot error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def create_snapshot(self, snapshot):
snapshot_id = self._create_snapshot(snapshot)
self._active_snapshot(snapshot_id)
def _stop_snapshot(self, snapshot):
snapshot_name = self._encode_name(snapshot['id'])
volume_name = self._encode_name(snapshot['volume_id'])
LOG.debug('_stop_snapshot:snapshot name:%(snapshot)s, '
'volume name:%(volume)s.'
% {'snapshot': snapshot_name,
'volume': volume_name})
snapshotid = self._get_snapshotid_by_name(snapshot_name)
stopdata = json.dumps({"ID": snapshotid})
url = self.url + "/snapshot/stop"
result = self.call(url, stopdata, "PUT")
self._assert_rest_result(result, 'Stop snapshot error.')
return snapshotid
def _delete_snapshot(self, snapshotid):
url = self.url + "/snapshot/%s" % snapshotid
data = json.dumps({"TYPE": "27", "ID": snapshotid})
result = self.call(url, data, "DELETE")
self._assert_rest_result(result, 'Delete snapshot error.')
def delete_snapshot(self, snapshot):
snapshotid = self._stop_snapshot(snapshot)
self._delete_snapshot(snapshotid)
def _get_snapshotid_by_name(self, name):
url = self.url + "/snapshot"
data = json.dumps({"TYPE": "27"})
result = self.call(url, data, "GET")
self._assert_rest_result(result, 'Get snapshot id error.')
snapshot_id = None
if "data" in result:
for item in result['data']:
if name == item['NAME']:
snapshot_id = item['ID']
break
return snapshot_id
def _copy_volume(self, volume, copy_name, src_lun, tgt_lun):
luncopy_id = self._create_luncopy(copy_name,
src_lun, tgt_lun)
try:
self._start_luncopy(luncopy_id)
self._wait_for_luncopy(luncopy_id)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_luncopy(luncopy_id)
self.delete_volume(volume)
self._delete_luncopy(luncopy_id)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
We use LUNcopy to copy a new volume from snapshot.
The time needed increases as volume size does.
"""
snapshot_name = self._encode_name(snapshot['id'])
src_lun_id = self._get_snapshotid_by_name(snapshot_name)
tgt_lun_id = self.create_volume(volume)
luncopy_name = self._encode_name(volume['id'])
self._copy_volume(volume, luncopy_name, src_lun_id, tgt_lun_id)
def create_cloned_volume(self, volume, src_vref):
"""Clone a new volume from an existing volume."""
volume_name = self._encode_name(src_vref['id'])
src_lun_id = self._get_volume_by_name(volume_name)
tgt_lun_id = self.create_volume(volume)
luncopy_name = self._encode_name(volume['id'])
self._copy_volume(volume, luncopy_name, src_lun_id, tgt_lun_id)
def _create_luncopy(self, luncopyname, srclunid, tgtlunid):
"""Create a luncopy."""
url = self.url + "/luncopy"
data = json.dumps({"TYPE": "219",
"NAME": luncopyname,
"DESCRIPTION": luncopyname,
"COPYSPEED": "2",
"LUNCOPYTYPE": "1",
"SOURCELUN": ("INVALID;%s;INVALID;INVALID;INVALID"
% srclunid),
"TARGETLUN": ("INVALID;%s;INVALID;INVALID;INVALID"
% tgtlunid)})
result = self.call(url, data)
msg = 'Create lun copy error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def _add_host_into_hostgroup(self, host_name, host_ip):
"""Associate host to hostgroup.
If host group doesn't exist, create one.
"""
hostgroup_id = self._find_hostgroup(host_name)
if hostgroup_id is None:
hostgroup_id = self._create_hostgroup(host_name)
hostid = self._find_host(host_name)
if hostid is None:
os_type = huawei_utils.get_conf_host_os_type(host_ip,
self.xml_conf)
hostid = self._add_host(host_name, os_type)
self._associate_host_to_hostgroup(hostgroup_id, hostid)
return hostid, hostgroup_id
def _mapping_hostgroup_and_lungroup(self, volume_name,
hostgroup_id, host_id):
"""Add hostgroup and lungroup to view."""
lungroup_id = self._find_lungroup(volume_name)
lun_id = self._get_volume_by_name(volume_name)
view_id = self._find_mapping_view(volume_name)
LOG.debug('_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)s'
'view_id: %(view_id)s'
% {'lun_group': lungroup_id,
'view_id': view_id})
try:
if view_id is None:
view_id = self._add_mapping_view(volume_name, host_id)
self._associate_hostgroup_to_view(view_id, hostgroup_id)
self._associate_lungroup_to_view(view_id, lungroup_id)
else:
if not self._hostgroup_associated(view_id, hostgroup_id):
self._associate_hostgroup_to_view(view_id, hostgroup_id)
if not self._lungroup_associated(view_id, lungroup_id):
self._associate_lungroup_to_view(view_id, lungroup_id)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_hostgoup_mapping_view(view_id, hostgroup_id)
self._delete_lungroup_mapping_view(view_id, lungroup_id)
self._delete_mapping_view(view_id)
return lun_id
def _ensure_initiator_added(self, initiator_name, hostid):
added = self._initiator_is_added_to_array(initiator_name)
if not added:
self._add_initiator_to_array(initiator_name)
else:
if self._is_initiator_associated_to_host(initiator_name) is False:
self._associate_initiator_to_host(initiator_name, hostid)
def initialize_connection_iscsi(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
LOG.debug('initiator name:%(initiator_name)s, '
'volume name:%(volume)s.'
% {'initiator_name': initiator_name,
'volume': volume_name})
(iscsi_iqn, target_ip) = self._get_iscsi_params(connector)
#create host_group if not exist
hostid, hostgroup_id = self._add_host_into_hostgroup(connector['host'],
connector['ip'])
self._ensure_initiator_added(initiator_name, hostid)
# Mapping lungroup and hostgroup to view
lun_id = self._mapping_hostgroup_and_lungroup(volume_name,
hostgroup_id, hostid)
hostlunid = self._find_host_lun_id(hostid, lun_id)
LOG.debug("host lun id is %s" % hostlunid)
# Return iSCSI properties.
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = ('%s:%s' % (target_ip, '3260'))
properties['target_iqn'] = iscsi_iqn
properties['target_lun'] = int(hostlunid)
properties['volume_id'] = volume['id']
return {'driver_volume_type': 'iscsi', 'data': properties}
def initialize_connection_fc(self, volume, connector):
wwns = connector['wwpns']
volume_name = self._encode_name(volume['id'])
LOG.debug('initiator name:%(initiator_name)s, '
'volume name:%(volume)s.'
% {'initiator_name': wwns,
'volume': volume_name})
# Create host group if not exist
hostid, hostgroup_id = self._add_host_into_hostgroup(connector['host'],
connector['ip'])
free_wwns = self._get_connected_free_wwns()
LOG.debug("the free wwns %s" % free_wwns)
for wwn in wwns:
if wwn in free_wwns:
self._add_fc_port_to_host(hostid, wwn)
lun_id = self._mapping_hostgroup_and_lungroup(volume_name,
hostgroup_id, hostid)
host_lun_id = self._find_host_lun_id(hostid, lun_id)
tgt_port_wwns = []
for wwn in wwns:
tgtwwpns = self._get_fc_target_wwpns(wwn)
if tgtwwpns:
tgt_port_wwns.append(tgtwwpns)
# Return FC properties.
properties = {}
properties['target_discovered'] = False
properties['target_wwn'] = tgt_port_wwns
properties['target_lun'] = int(host_lun_id)
properties['volume_id'] = volume['id']
LOG.debug("the fc server properties is:%s" % properties)
return {'driver_volume_type': 'fibre_channel',
'data': properties}
def _get_iscsi_tgt_port(self):
url = self.url + "/iscsidevicename"
result = self.call(url, None)
msg = 'Get iSCSI target port error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data'][0]['CMO_ISCSI_DEVICE_NAME']
def _find_hostgroup(self, groupname):
"""Get the given hostgroup id."""
url = self.url + "/hostgroup"
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get host group information error.')
host_group_id = None
if "data" in result:
for item in result['data']:
if groupname == item['NAME']:
host_group_id = item['ID']
break
return host_group_id
def _find_lungroup(self, lungroupname):
"""Get the given hostgroup id."""
url = self.url + "/lungroup"
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Get lun group information error.')
lun_group_id = None
if 'data' in result:
for item in result['data']:
if lungroupname == item['NAME']:
lun_group_id = item['ID']
break
return lun_group_id
def _create_hostgroup(self, hostgroupname):
url = self.url + "/hostgroup"
data = json.dumps({"TYPE": "14", "NAME": hostgroupname})
result = self.call(url, data)
msg = 'Create host group error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def _create_lungroup(self, lungroupname):
url = self.url + "/lungroup"
data = json.dumps({"DESCRIPTION": lungroupname,
"NAME": lungroupname})
result = self.call(url, data)
msg = 'Create lun group error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']['ID']
def _delete_lungroup(self, lungroupid):
url = self.url + "/LUNGroup/" + lungroupid
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'Delete lun group error.')
def _lungroup_associated(self, viewid, lungroupid):
url_subfix = ("/mappingview/associate?TYPE=245&"
"ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroupid)
url = self.url + url_subfix
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Check lun group associated error.')
if "data" in result:
for item in result['data']:
if viewid == item['ID']:
return True
return False
def _hostgroup_associated(self, viewid, hostgroupid):
url_subfix = ("/mappingview/associate?TYPE=245&"
"ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroupid)
url = self.url + url_subfix
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Check host group associated error.')
if "data" in result:
for item in result['data']:
if viewid == item['ID']:
return True
return False
def _find_host_lun_id(self, hostid, lunid):
time.sleep(2)
url = self.url + ("/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21"
"&ASSOCIATEOBJID=%s" % (hostid))
result = self.call(url, None, "GET")
self._assert_rest_result(result, 'Find host lun id error.')
host_lun_id = 1
if "data" in result:
for item in result['data']:
if lunid == item['ID']:
associate_data = result['data'][0]['ASSOCIATEMETADATA']
try:
hostassoinfo = json.loads(associate_data)
host_lun_id = hostassoinfo['HostLUNID']
break
except Exception as err:
msg = _("JSON transfer data error. %s") % err
LOG.error(msg)
raise err
return host_lun_id
def _find_host(self, hostname):
"""Get the given host ID."""
url = self.url + "/host"
data = json.dumps({"TYPE": "21"})
result = self.call(url, data, "GET")
self._assert_rest_result(result, 'Find host in host group error.')
host_id = None
if "data" in result:
for item in result['data']:
if hostname == item['NAME']:
host_id = item['ID']
break
return host_id
def _add_host(self, hostname, type):
"""Add a new host."""
url = self.url + "/host"
data = json.dumps({"TYPE": "21",
"NAME": hostname,
"OPERATIONSYSTEM": type})
result = self.call(url, data)
self._assert_rest_result(result, 'Add new host error.')
if "data" in result:
return result['data']['ID']
else:
return None
def _associate_host_to_hostgroup(self, hostgroupid, hostid):
url = self.url + "/host/associate"
data = json.dumps({"ID": hostgroupid,
"ASSOCIATEOBJTYPE": "21",
"ASSOCIATEOBJID": hostid})
result = self.call(url, data)
self._assert_rest_result(result, 'Associate host to host group error.')
def _associate_lun_to_lungroup(self, lungroupid, lunid):
"""Associate lun to lun group."""
url = self.url + "/lungroup/associate"
data = json.dumps({"ID": lungroupid,
"ASSOCIATEOBJTYPE": "11",
"ASSOCIATEOBJID": lunid})
result = self.call(url, data)
self._assert_rest_result(result, 'Associate lun to lun group error.')
def _delete_associated_lun_from_lungroup(self, lungroupid, lunid):
"""Remove lun from lun group."""
url = self.url + ("/lungroup/associate?ID=%s"
"&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=%s"
% (lungroupid, lunid))
result = self.call(url, None, 'DELETE')
self._assert_rest_result(result,
'Delete associated lun from lun group error')
def _initiator_is_added_to_array(self, ininame):
"""Check whether the initiator is already added in array."""
url = self.url + "/iscsi_initiator"
data = json.dumps({"TYPE": "222", "ID": ininame})
result = self.call(url, data, "GET")
self._assert_rest_result(result,
'Check initiator added to array error.')
if "data" in result:
for item in result['data']:
if item["ID"] == ininame:
return True
return False
def _is_initiator_associated_to_host(self, ininame):
"""Check whether the initiator is associated to the host."""
url = self.url + "/iscsi_initiator"
data = json.dumps({"TYPE": "222", "ID": ininame})
result = self.call(url, data, "GET")
self._assert_rest_result(result,
'Check initiator associated to host error.')
if "data" in result:
for item in result['data']:
if item['ID'] == ininame and item['ISFREE'] == "true":
return False
return True
def _add_initiator_to_array(self, ininame):
"""Add a new initiator to storage device."""
url = self.url + "/iscsi_initiator/"
data = json.dumps({"TYPE": "222",
"ID": ininame,
"USECHAP": "False"})
result = self.call(url, data)
self._assert_rest_result(result, 'Add initiator to array error.')
def _associate_initiator_to_host(self, ininame, hostid):
"""Associate initiator with the host."""
url = self.url + "/iscsi_initiator/" + ininame
data = json.dumps({"TYPE": "222",
"ID": ininame,
"USECHAP": "False",
"PARENTTYPE": "21",
"PARENTID": hostid})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Associate initiator to host error.')
def _find_mapping_view(self, name):
"""Find mapping view."""
url = self.url + "/mappingview"
data = json.dumps({"TYPE": "245"})
result = self.call(url, data, "GET")
msg = 'Find map view error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
viewid = None
for item in result['data']:
if name == item['NAME']:
viewid = item['ID']
break
return viewid
def _add_mapping_view(self, name, host_id):
url = self.url + "/mappingview"
data = json.dumps({"NAME": name, "TYPE": "245"})
result = self.call(url, data)
self._assert_rest_result(result, 'Add map view error.')
return result['data']['ID']
def _associate_hostgroup_to_view(self, viewID, hostGroupID):
url = self.url + "/MAPPINGVIEW/CREATE_ASSOCIATE"
data = json.dumps({"ASSOCIATEOBJTYPE": "14",
"ASSOCIATEOBJID": hostGroupID,
"TYPE": "245",
"ID": viewID})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Associate host to view error.')
def _associate_lungroup_to_view(self, viewID, lunGroupID):
url = self.url + "/MAPPINGVIEW/CREATE_ASSOCIATE"
data = json.dumps({"ASSOCIATEOBJTYPE": "256",
"ASSOCIATEOBJID": lunGroupID,
"TYPE": "245",
"ID": viewID})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Associate lun group to view error.')
def _delete_lungroup_mapping_view(self, view_id, lungroup_id):
"""remove lun group associate from the mapping view."""
url = self.url + "/mappingview/REMOVE_ASSOCIATE"
data = json.dumps({"ASSOCIATEOBJTYPE": "256",
"ASSOCIATEOBJID": lungroup_id,
"TYPE": "245",
"ID": view_id})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Delete lun group from view error.')
def _delete_hostgoup_mapping_view(self, view_id, hostgroup_id):
"""remove host group associate from the mapping view."""
url = self.url + "/mappingview/REMOVE_ASSOCIATE"
data = json.dumps({"ASSOCIATEOBJTYPE": "14",
"ASSOCIATEOBJID": hostgroup_id,
"TYPE": "245",
"ID": view_id})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Delete host group from view error.')
def _delete_mapping_view(self, view_id):
"""remove mapping view from the storage."""
url = self.url + "/mappingview/" + view_id
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'Delete map view error.')
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
initiator_name = connector['initiator']
volume_name = self._encode_name(volume['id'])
host_name = connector['host']
LOG.debug('terminate_connection:volume name: %(volume)s, '
'initiator name: %(ini)s.'
% {'volume': volume_name,
'ini': initiator_name})
view_id = self._find_mapping_view(volume_name)
hostgroup_id = self._find_hostgroup(host_name)
lungroup_id = self._find_lungroup(volume_name)
if view_id is not None:
self._delete_hostgoup_mapping_view(view_id, hostgroup_id)
self._delete_lungroup_mapping_view(view_id, lungroup_id)
self._delete_mapping_view(view_id)
def login_out(self):
"""logout the session."""
url = self.url + "/sessions"
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'Log out of session error.')
def _start_luncopy(self, luncopyid):
"""Start a LUNcopy."""
url = self.url + "/LUNCOPY/start"
data = json.dumps({"TYPE": "219", "ID": luncopyid})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Start lun copy error.')
def _get_capacity(self):
"""Get free capacity and total capacity of the pools."""
poolinfo = self._find_pool_info()
pool_capacity = {'total_capacity': 0.0,
'CAPACITY': 0.0}
if poolinfo:
total = int(poolinfo['TOTALCAPACITY']) / 1024.0 / 1024.0 / 2
free = int(poolinfo['CAPACITY']) / 1024.0 / 1024.0 / 2
pool_capacity['total_capacity'] = total
pool_capacity['free_capacity'] = free
return pool_capacity
def _get_lun_conf_params(self):
"""Get parameters from config file for creating lun."""
# Default lun set information
lunsetinfo = {'LUNType': 'Thick',
'StripUnitSize': '64',
'WriteType': '1',
'MirrorSwitch': '1',
'PrefetchType': '3',
'PrefetchValue': '0',
'PrefetchTimes': '0'}
root = huawei_utils.parse_xml_file(self.xml_conf)
luntype = root.findtext('LUN/LUNType')
if luntype:
if luntype.strip() in ['Thick', 'Thin']:
lunsetinfo['LUNType'] = luntype.strip()
if luntype.strip() == 'Thick':
lunsetinfo['LUNType'] = 0
if luntype.strip() == 'Thin':
lunsetinfo['LUNType'] = 1
elif luntype is not '' and luntype is not None:
err_msg = (_('Config file is wrong. LUNType must be "Thin"'
' or "Thick". LUNType:%(fetchtype)s')
% {'fetchtype': luntype})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
stripunitsize = root.findtext('LUN/StripUnitSize')
if stripunitsize is not None:
lunsetinfo['StripUnitSize'] = stripunitsize.strip()
writetype = root.findtext('LUN/WriteType')
if writetype is not None:
lunsetinfo['WriteType'] = writetype.strip()
mirrorswitch = root.findtext('LUN/MirrorSwitch')
if mirrorswitch is not None:
lunsetinfo['MirrorSwitch'] = mirrorswitch.strip()
prefetch = root.find('LUN/Prefetch')
fetchtype = prefetch.attrib['Type']
if prefetch is not None and prefetch.attrib['Type']:
if fetchtype in ['0', '1', '2', '3']:
lunsetinfo['PrefetchType'] = fetchtype.strip()
typevalue = prefetch.attrib['Value'].strip()
if lunsetinfo['PrefetchType'] == '1':
lunsetinfo['PrefetchValue'] = typevalue
elif lunsetinfo['PrefetchType'] == '2':
lunsetinfo['PrefetchValue'] = typevalue
else:
err_msg = (_('PrefetchType config is wrong. PrefetchType'
' must in 1,2,3,4. fetchtype is:%(fetchtype)s')
% {'fetchtype': fetchtype})
LOG.error(err_msg)
raise exception.CinderException(err_msg)
else:
LOG.debug('Use default prefetch fetchtype. '
'Prefetch fetchtype:Intelligent.')
return lunsetinfo
def _wait_for_luncopy(self, luncopyid):
"""Wait for LUNcopy to complete."""
while True:
luncopy_info = self._get_luncopy_info(luncopyid)
if luncopy_info['status'] == '40':
break
elif luncopy_info['state'] != '1':
err_msg = (_('_wait_for_luncopy:LUNcopy status is not normal.'
'LUNcopy name: %(luncopyname)s')
% {'luncopyname': luncopyid})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
time.sleep(10)
def _get_luncopy_info(self, luncopyid):
"""Get LUNcopy information."""
url = self.url + "/LUNCOPY?range=[0-100000]"
data = json.dumps({"TYPE": "219", })
result = self.call(url, data, "GET")
self._assert_rest_result(result, 'Get lun copy information error.')
luncopyinfo = {}
if "data" in result:
for item in result['data']:
if luncopyid == item['ID']:
luncopyinfo['name'] = item['NAME']
luncopyinfo['id'] = item['ID']
luncopyinfo['state'] = item['HEALTHSTATUS']
luncopyinfo['status'] = item['RUNNINGSTATUS']
break
return luncopyinfo
def _delete_luncopy(self, luncopyid):
"""Delete a LUNcopy."""
url = self.url + "/LUNCOPY/%s" % luncopyid
result = self.call(url, None, "DELETE")
self._assert_rest_result(result, 'Delete lun copy error.')
def _get_connected_free_wwns(self):
"""Get free connected FC port WWNs.
If no new ports connected, return an empty list.
"""
url = self.url + "/fc_initiator?ISFREE=true&range=[0-1000]"
result = self.call(url, None, "GET")
msg = 'Get connected free FC wwn error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
wwns = []
for item in result['data']:
wwns.append(item['ID'])
return wwns
def _add_fc_port_to_host(self, hostid, wwn, multipathtype=0):
"""Add a FC port to the host."""
url = self.url + "/fc_initiator/" + wwn
data = json.dumps({"TYPE": "223",
"ID": wwn,
"PARENTTYPE": 21,
"PARENTID": hostid})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Add FC port to host error.')
def _get_iscsi_port_info(self, ip):
"""Get iscsi port info in order to build the iscsi target iqn."""
url = self.url + "/eth_port"
result = self.call(url, None, "GET")
msg = 'Get iSCSI port information error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
iscsi_port_info = None
for item in result['data']:
if ip == item['IPV4ADDR']:
iscsi_port_info = item['LOCATION']
break
if not iscsi_port_info:
msg = (_('_get_iscsi_port_info: Failed to get iscsi port info '
'through config IP %(ip)s, please check config file.')
% {'ip': ip})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return iscsi_port_info
def _get_iscsi_conf(self):
"""Get iSCSI info from config file."""
iscsiinfo = {}
root = huawei_utils.parse_xml_file(self.xml_conf)
iscsiinfo['DefaultTargetIP'] = \
root.findtext('iSCSI/DefaultTargetIP').strip()
initiator_list = []
tmp_dic = {}
for dic in root.findall('iSCSI/Initiator'):
# Strip values of dic
for k, v in dic.items():
tmp_dic[k] = v.strip()
initiator_list.append(tmp_dic)
iscsiinfo['Initiator'] = initiator_list
return iscsiinfo
def _get_tgt_iqn(self, iscsiip):
"""Get target iSCSI iqn."""
LOG.debug('_get_tgt_iqn: iSCSI IP is %s.' % iscsiip)
ip_info = self._get_iscsi_port_info(iscsiip)
iqn_prefix = self._get_iscsi_tgt_port()
split_list = ip_info.split(".")
newstr = split_list[1] + split_list[2]
if newstr[0] == 'A':
ctr = "0"
elif newstr[0] == 'B':
ctr = "1"
interface = '0' + newstr[1]
port = '0' + newstr[3]
iqn_suffix = ctr + '02' + interface + port
for i in range(0, len(iqn_suffix)):
if iqn_suffix[i] != '0':
iqn_suffix = iqn_suffix[i:]
break
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
LOG.debug('_get_tgt_iqn: iSCSI target iqn is %s' % iqn)
return iqn
def _get_fc_target_wwpns(self, wwn):
url = (self.url +
"/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=" + wwn)
result = self.call(url, None, "GET")
msg = 'Get FC target wwpn error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
fc_wwpns = None
for item in result['data']:
if wwn == item['INITIATOR_PORT_WWN']:
fc_wwpns = item['TARGET_PORT_WWN']
break
return fc_wwpns
def _parse_volume_type(self, volume):
type_id = volume['volume_type_id']
params = self._get_lun_conf_params()
LOG.debug('_parse_volume_type: type id: %(type_id)s '
'config parameter is: %(params)s'
% {'type_id': type_id,
'params': params})
poolinfo = self._find_pool_info()
volume_size = self._get_volume_size(poolinfo, volume)
params['volume_size'] = volume_size
params['pool_id'] = poolinfo['ID']
if type_id is not None:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
for key, value in specs.iteritems():
key_split = key.split(':')
if len(key_split) > 1:
if key_split[0] == 'drivers':
key = key_split[1]
else:
continue
else:
key = key_split[0]
if key in QOS_KEY:
params["qos"] = value.strip()
params["qos_level"] = key
elif key in TIER_KEY:
params["tier"] = value.strip()
elif key in params.keys():
params[key] = value.strip()
else:
conf = self.configuration.cinder_huawei_conf_file
LOG.warn(_('_parse_volume_type: Unacceptable parameter '
'%(key)s. Please check this key in extra_specs '
'and make it consistent with the configuration '
'file %(conf)s.') % {'key': key, 'conf': conf})
LOG.debug("The config parameters are: %s" % params)
return params
def update_volume_stats(self, refresh=False):
capacity = self._get_capacity()
data = {}
data['vendor_name'] = 'Huawei'
data['total_capacity_gb'] = capacity['total_capacity']
data['free_capacity_gb'] = capacity['free_capacity']
data['reserved_percentage'] = 0
data['QoS_support'] = True
data['Tier_support'] = True
return data
def _find_qos_policy_info(self, policy_name):
url = self.url + "/ioclass"
result = self.call(url, None, "GET")
msg = 'Get qos policy error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
qos_info = {}
for item in result['data']:
if policy_name == item['NAME']:
qos_info['ID'] = item['ID']
lun_list = json.loads(item['LUNLIST'])
qos_info['LUNLIST'] = lun_list
break
return qos_info
def _update_qos_policy_lunlist(self, lunlist, policy_id):
url = self.url + "/ioclass/" + policy_id
data = json.dumps({"TYPE": "230",
"ID": policy_id,
"LUNLIST": lunlist})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Up date qos policy error.')
def _get_login_info(self):
"""Get login IP, username and password from config file."""
logininfo = {}
filename = self.configuration.cinder_huawei_conf_file
tree = ET.parse(filename)
root = tree.getroot()
logininfo['HVSURL'] = root.findtext('Storage/HVSURL').strip()
need_encode = False
for key in ['UserName', 'UserPassword']:
node = root.find('Storage/%s' % key)
node_text = node.text
# Prefix !$$$ means encoded already.
if node_text.find('!$$$') > -1:
logininfo[key] = base64.b64decode(node_text[4:])
else:
logininfo[key] = node_text
node.text = '!$$$' + base64.b64encode(node_text)
need_encode = True
if need_encode:
self._change_file_mode(filename)
try:
tree.write(filename, 'UTF-8')
except Exception as err:
LOG.warn(_('%s') % err)
return logininfo
def _change_file_mode(self, filepath):
utils.execute('chmod', '777', filepath, run_as_root=True)
def _check_conf_file(self):
"""Check the config file, make sure the essential items are set."""
root = huawei_utils.parse_xml_file(self.xml_conf)
check_list = ['Storage/HVSURL', 'Storage/UserName',
'Storage/UserPassword']
for item in check_list:
if not huawei_utils.is_xml_item_exist(root, item):
err_msg = (_('_check_conf_file: Config file invalid. '
'%s must be set.') % item)
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
# make sure storage pool is set
if not huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool'):
err_msg = _('_check_conf_file: Config file invalid. '
'StoragePool must be set.')
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
# make sure host os type valid
if huawei_utils.is_xml_item_exist(root, 'Host', 'OSType'):
os_list = huawei_utils.os_type.keys()
if not huawei_utils.is_xml_item_valid(root, 'Host', os_list,
'OSType'):
err_msg = (_('_check_conf_file: Config file invalid. '
'Host OSType invalid.\n'
'The valid values are: %(os_list)s')
% {'os_list': os_list})
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
def _get_iscsi_params(self, connector):
"""Get target iSCSI params, including iqn, IP."""
initiator = connector['initiator']
iscsi_conf = self._get_iscsi_conf()
target_ip = None
for ini in iscsi_conf['Initiator']:
if ini['Name'] == initiator:
target_ip = ini['TargetIP']
break
# If didn't specify target IP for some initiator, use default IP.
if not target_ip:
if iscsi_conf['DefaultTargetIP']:
target_ip = iscsi_conf['DefaultTargetIP']
else:
msg = (_('_get_iscsi_params: Failed to get target IP '
'for initiator %(ini)s, please check config file.')
% {'ini': initiator})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
target_iqn = self._get_tgt_iqn(target_ip)
return (target_iqn, target_ip)
def extend_volume(self, volume, new_size):
name = self._encode_name(volume['id'])
lun_id = self._get_volume_by_name(name)
if lun_id:
url = self.url + "/lun/expand"
capacity = int(new_size) * units.Gi / 512
data = json.dumps({"TYPE": "11",
"ID": lun_id,
"CAPACITY": capacity})
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Extend lun error.')
else:
LOG.warn(_('Can not find lun in array'))
|
|
#
#
# =================================================================
# =================================================================
"""VirtualIOServer interface."""
import xml.etree.ElementTree as ElementTree
from xml.etree.ElementTree import Element
from eventlet import greenthread
import os
import errno
from paxes_cinder.k2aclient import _
from paxes_cinder.k2aclient import base
from paxes_cinder.k2aclient.v1 import k2uom
from paxes_cinder.k2aclient.v1 import k2web
from paxes_cinder.k2aclient.exceptions import K2JobFailure
def _create_luarecovery_doc(device_list,
ireliableITL=True,
iversion="1"):
"""Create an luarecovery_doc
:param devlice_list: a list of (ivendor, ideviceID, itls)
:param ivendor: This string must be upper case.
The following are expected vendor strings:
all other cases the attribute should be
set to "OTHER":
1. IBM
2. EMC
3. NETAPP
4. HDS
5. HP
:param ideviceID: a valid page 83h descriptor of the
storage appliance
:param device_list: a list of (iwwpn, twwpn, lua)
:param iwwpn: world wide port name of a VIOS initiator
:param twwpn: world wide port name of a target port on
storage appliance WWPN
:param lua: Logical unit address of a particular piece of
storage (device) that can be accessed from the
target port.
:param ireliableITL: False if the ITL nexus is based on a current
VIOS adapter inventory and True if its based
on an ITL nexus at the time the logical unit
was created.
:param version: string containing requested version of the xml doc,
must be "1"
"""
root = Element("XML_LIST")
# general
general = Element("general")
root.append(general)
version = Element("version")
version.text = iversion
general.append(version)
# reliableITL
reliableITL = Element("reliableITL")
reliableITL.text = "false"
if ireliableITL:
reliableITL.text = "true"
root.append(reliableITL)
# deviceList
deviceList = Element("deviceList")
root.append(deviceList)
for idevice in device_list:
(ivendor, ideviceID, itls) = idevice
device = Element("device")
deviceList.append(device)
# vendor
vendor = Element("vendor")
vendor.text = ivendor
device.append(vendor)
# deviceID
deviceID = Element("deviceID")
deviceID.text = ideviceID
device.append(deviceID)
itlList = Element("itlList")
device.append(itlList)
first = True
for itl in itls:
if first:
number = Element("number")
number.text = "%d" % (len(itls))
itlList.append(number)
first = False
(iIwwpn, iTwwpn, ilua) = itl
itl = Element("itl")
itlList.append(itl)
# Iwwpn
Iwwpn = Element("Iwwpn")
Iwwpn.text = iIwwpn
itl.append(Iwwpn)
# Twwpn
Twwpn = Element("Twwpn")
Twwpn.text = iTwwpn
itl.append(Twwpn)
# Lua
lua = Element("lua")
lua.text = ilua
itl.append(lua)
return root
def _prepdir(targetdir):
try:
os.makedirs(targetdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(targetdir):
pass
else:
msg = _("during directory preparation,"
" cant create directory: "
" >%s<") % targetdir
raise Exception(msg)
if not os.access(targetdir, os.W_OK):
msg = _("during directory preparation,"
" cant write to directory: "
" >%s<") % targetdir
raise Exception(msg)
class VirtualIOServerManager(base.ManagerWithFind):
"""Manage :class:`VirtualIOServer` resources."""
resource_class = k2uom.VirtualIOServer
def list(self, managedsystem, xa=None):
"""Get a list of all VirtualIOServers for a
particular ManagedSystem accessed through a
particular hmc.
:rtype: list of :class:`VirtualIOServer`.
"""
return self._list("/rest/api/uom/ManagedSystem/%s/VirtualIOServer" %
managedsystem, xa=xa)
def listasroot(self, xa=None):
"""Get a list of all LogicalPartitions
accessed through a particular hmc.
:rtype: list of :class:`LogicalPartition`.
"""
return self._list("/rest/api/uom/VirtualIOServer", xa=xa)
def get(self, managedsystem, virtualioserver, xag=[], xa=None):
"""Get a specific VirtualIOServer.
:param virtualioserver: The ID of the :class:`VirtualIOServer` to get.
:rtype: :class:`VirtualIOServer`
"""
return self._get("/rest/api/uom/ManagedSystem/%s/VirtualIOServer/%s" %
(managedsystem, virtualioserver,), xag=xag, xa=xa)
def getasroot(self, virtualioserver, xag=[], xa=None):
"""Get a specific VirtualIOServer.
:param virtualioserver: The ID of the :class:`VirtualIOServer`.
:rtype: :class:`VirtualIOServer`
"""
return self._get("/rest/api/uom/VirtualIOServer/%s" %
virtualioserver, xag=xag, xa=xa)
########
# non CRUD
def lua_recovery(self, vios, vendor, device_id, itls, xa=None):
"""For the specified device, prep VIOS for file upload.
:param vios: the target VIOS
:param vendor: This string must be upper case.
The following are expected vendor strings:
all other cases the attribute should be
set to "OTHER":
1. IBM
2. EMC
3. NETAPP
4. HDS
5. HP
:param device_id: a valid page 83h descriptor of the
storage appliance
:param itl_list: a list of (iwwpn, twwpn, lua)
:param iwwpn: world wide port name of a VIOS initiator
:param twwpn: world wide port name of a target port on
storage appliance WWPN
:param lua: Logical unit address of a particular piece of
storage (device) that can be accessed from the
target port.
:retval a triple - (jobstatus, version, [status, pvName, uniqueID])
jobstatus: The K2 Job return status
version: "1"
status: "1" This means that a device was found from an ITL nexus
and it was determined to be in use by the VIOS. The most
likely cause is that a virtual device was not removed
from a previous deploy. The VIOS took no action in this
case.
"2" This means that the ITL nexus is not consider reliable
and some error occurred that prevents the VIOS from
determining making a decision on safely removing a
device. The VIOS took no action in this case.
"3" This means the information provided and action taken by
the VIOS was sufficient to find the storage.
pvName, and uniqueID elements will follow in the
data stream.
"4" This means some new storage element was found at the
ITL nexus but VIOS could not determine if it is the
storage of interest to the management tool.
"5" The VIOS could not determine if this is logical unit
of interest to the management tool however did
find a device from the ITL nexus list
"6" The VIOS could not find a device from the itl nexus list.
It could be some SCSI or transport error occurred
during configuration or the ITL nexus list was incorrect.
pvName: If not None this element is the current physical volume name
uniqueID: If not None, this uniquely identifies storage
throughout the SAN and is built based on vendor
prescribed method.
"""
# if necessary, authenticate
if self.api.client.k2operator is None:
self.api.client.authenticate()
jrequest = self.api.web_job.getjob(vios, 'LUARecovery', xa=xa)
device_list = [(vendor, device_id, itls)]
idoc = _create_luarecovery_doc(device_list)
cdata_template = "<![CDATA[%s]]>"
inputXML = cdata_template % ElementTree.tostring(idoc)
jp = k2web.JobParameter()
jp.parameter_name = "inputXML"
jp.parameter_value = inputXML
jpc = k2web.JobParameter_Collection()
jpc.job_parameter.append(jp)
jrequest.job_parameters = jpc
# for jp in jrequest.job_parameters.job_parameter:
# print (jp.parameter_name, jp.parameter_value)
jresponse = self.api.web_job.runjob(vios, jrequest, xa=xa)
k2respi = jresponse._k2resp
while jresponse.status == 'NOT_STARTED' or \
jresponse.status == 'RUNNING':
greenthread.sleep(1)
jresponse = self.api.web_job.readjob(jresponse.job_id, xa=xa)
if not jresponse.status.startswith("COMPLETED"):
diagfspeci = self.api.exclogger.emit("JOB", "lua_recovery",
k2respi)
diagfspec = self.api.exclogger.emit("JOB", "lua_recovery",
jresponse._k2resp)
msg = _("k2aclient:"
" during lua_recovery,"
" for device_id: >%(device_id)s<, experienced"
" job failure,"
" job_id: >%(jresponse.job_id)s<,"
" status: >%(jresponse.status)s<,"
" input K2 job diagnostics have been"
" written to: >%(diagfspeci)s<,"
" response k2 job diagnostics"
" have been written to: >%(diagfspec)s<")
raise K2JobFailure(msg %
{"device_id": device_id,
"jresponse.job_id": jresponse.job_id,
"jresponse.status": jresponse.status,
"diagfspeci": diagfspeci,
"diagfspec": diagfspec, },
jresponse._k2resp,
diagfspeci=diagfspeci,
diagfspec=diagfspec)
# find "OutputXML" element
# note: apparently also "StdError" and "StdOut" entries
result = None
for jp in jresponse.results.job_parameter:
if jp.parameter_name == "OutputXML":
result = jp.parameter_value
# print (jp.parameter_name, jp.parameter_value)
version = None
device_list = []
if result is not None:
root = ElementTree.fromstring(result)
for child in root:
if child.tag == "version":
version = child.text
elif child.tag == "deviceList":
for gchild in child:
status = None
pv_name = None
unique_id = None
for ggchild in gchild:
if ggchild.tag == "status":
status = ggchild.text
elif ggchild.tag == "pvName":
pv_name = ggchild.text
elif ggchild.tag == "uniqueID":
unique_id = ggchild.text
device_list.append((status, pv_name, unique_id))
return (jresponse.status, version, device_list, jresponse.job_id)
|
|
from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,
AdminPasswordChangeForm)
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from notifications.admin import Notification
from .forms import CustomUserChangeForm, CustomUserCreationForm, NotificationForm
from .models import User as CustomUser , Tipo
from notifications import notify
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class CustomUserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser','tipos',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}
),
)
form = CustomUserChangeForm
add_form = CustomUserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'tipos','groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(CustomUserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(CustomUserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls import patterns
return patterns('',
(r'^(\d+)/password/$',
self.admin_site.admin_view(self.user_change_password))
) + super(CustomUserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(CustomUserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
#@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(CustomUserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': '_popup' in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(CustomUserAdmin, self).response_add(request, obj,
post_url_continue)
from .models import User
from django.contrib.contenttypes.models import ContentType
class NotificationsAdmin(admin.ModelAdmin):
list_display = ('id','actor','recipient','verb','timestamp')
list_filter = ('verb',)
search_fields = ('actor', 'recipient', 'verb',)
#raw_id_fields = ('author', )
form = NotificationForm
class Meta:
model = Notification
ordering = ('+timestamp', )
def save_model(self, request, obj, form, change):
"""
Crea las notificaciones para el usuario o para todos los usuarios
"""
try:
form.cleaned_data['todos']
todos=True
except Exception:
todos=False
if todos:
#Si es para todos los usuarios
author=form.cleaned_data['author']
titulo=form.cleaned_data['titulo']
descripcion=form.cleaned_data['descripcion']
for usuario in User.objects.all():
notify.send(
form.cleaned_data['author'],
recipient=usuario,
verb=titulo,
timestamp=obj.timestamp,
description=descripcion
)
else:
#Un usuario en particular
author=form.cleaned_data['author']
titulo=form.cleaned_data['titulo']
descripcion=form.cleaned_data['descripcion']
destinario=form.cleaned_data['destinario']
obj.actor_content_type=ContentType.objects.get_for_model(User)
obj.actor=author
obj.verb=titulo
obj.description=descripcion
obj.recipient=destinario
obj.save()
return obj
class NotificationsAdmin2(admin.ModelAdmin):
"""
DEPRECATE
---------
No se realiza tantanso modificaciones y es limitado.
"""
list_display = ['id']
ordering = ['id']
raw_id_fields = ('recipient','actor_object_id' )
class Meta:
model = Notification
ordering = ('-timestamp', )
admin.site.register(CustomUser, CustomUserAdmin)
admin.site.unregister(Notification)
admin.site.register(Notification,NotificationsAdmin)
admin.site.register(Tipo)
|
|
# -*- coding: utf-8 -*-
"""
flaskbb.forum.views
~~~~~~~~~~~~~~~~~~~~
This module handles the forum logic like creating and viewing
topics and posts.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import datetime
from flask import (Blueprint, redirect, url_for, current_app,
request, flash)
from flask_login import login_required, current_user
from flask_babelex import gettext as _
from flaskbb.extensions import db
from flaskbb.utils.settings import flaskbb_config
from flaskbb.utils.helpers import (get_online_users, time_diff, format_quote,
render_template)
from flaskbb.utils.permissions import (can_post_reply, can_post_topic,
can_delete_topic, can_delete_post,
can_edit_post, can_moderate)
from flaskbb.forum.models import (Category, Forum, Topic, Post, ForumsRead,
TopicsRead)
from flaskbb.forum.forms import (QuickreplyForm, ReplyForm, NewTopicForm,
ReportForm, UserSearchForm, SearchPageForm)
from flaskbb.user.models import User
forum = Blueprint("forum", __name__)
@forum.route("/")
def index():
categories = Category.get_all(user=current_user)
# Fetch a few stats about the forum
user_count = User.query.count()
topic_count = Topic.query.count()
post_count = Post.query.count()
newest_user = User.query.order_by(User.id.desc()).first()
# Check if we use redis or not
if not current_app.config["REDIS_ENABLED"]:
online_users = User.query.filter(User.lastseen >= time_diff()).count()
# Because we do not have server side sessions, we cannot check if there
# are online guests
online_guests = None
else:
online_users = len(get_online_users())
online_guests = len(get_online_users(guest=True))
return render_template("forum/index.html",
categories=categories,
user_count=user_count,
topic_count=topic_count,
post_count=post_count,
newest_user=newest_user,
online_users=online_users,
online_guests=online_guests)
@forum.route("/category/<int:category_id>")
@forum.route("/category/<int:category_id>-<slug>")
def view_category(category_id, slug=None):
category, forums = Category.\
get_forums(category_id=category_id, user=current_user)
return render_template("forum/category.html", forums=forums,
category=category)
@forum.route("/forum/<int:forum_id>")
@forum.route("/forum/<int:forum_id>-<slug>")
def view_forum(forum_id, slug=None):
page = request.args.get('page', 1, type=int)
forum_instance, forumsread = Forum.get_forum(forum_id=forum_id,
user=current_user)
if forum_instance.external:
return redirect(forum_instance.external)
topics = Forum.get_topics(
forum_id=forum_instance.id, user=current_user, page=page,
per_page=flaskbb_config["TOPICS_PER_PAGE"]
)
return render_template(
"forum/forum.html", forum=forum_instance,
topics=topics, forumsread=forumsread,
)
@forum.route("/topic/<int:topic_id>", methods=["POST", "GET"])
@forum.route("/topic/<int:topic_id>-<slug>", methods=["POST", "GET"])
def view_topic(topic_id, slug=None):
page = request.args.get('page', 1, type=int)
# Fetch some information about the topic
topic = Topic.query.filter_by(id=topic_id).first()
# Count the topic views
topic.views += 1
topic.save()
# fetch the posts in the topic
posts = Post.query.\
join(User, Post.user_id == User.id).\
filter(Post.topic_id == topic.id).\
add_entity(User).\
order_by(Post.id.asc()).\
paginate(page, flaskbb_config['POSTS_PER_PAGE'], False)
# Update the topicsread status if the user hasn't read it
forumsread = None
if current_user.is_authenticated():
forumsread = ForumsRead.query.\
filter_by(user_id=current_user.id,
forum_id=topic.forum.id).first()
topic.update_read(current_user, topic.forum, forumsread)
form = None
if can_post_reply(user=current_user, topic=topic):
form = QuickreplyForm()
if form.validate_on_submit():
post = form.save(current_user, topic)
return view_post(post.id)
return render_template("forum/topic.html", topic=topic, posts=posts,
last_seen=time_diff(), form=form)
@forum.route("/post/<int:post_id>")
def view_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
count = post.topic.post_count
page = count / flaskbb_config["POSTS_PER_PAGE"]
if count > flaskbb_config["POSTS_PER_PAGE"]:
page += 1
else:
page = 1
return redirect(post.topic.url + "?page=%d#pid%s" % (page, post.id))
@forum.route("/<int:forum_id>/topic/new", methods=["POST", "GET"])
@forum.route("/<int:forum_id>-<slug>/topic/new", methods=["POST", "GET"])
@login_required
def new_topic(forum_id, slug=None):
forum_instance = Forum.query.filter_by(id=forum_id).first_or_404()
if not can_post_topic(user=current_user, forum=forum):
flash(_("You do not have the permissions to create a new topic."),
"danger")
return redirect(forum.url)
form = NewTopicForm()
if request.method == "POST":
if "preview" in request.form and form.validate():
return render_template(
"forum/new_topic.html", forum=forum_instance,
form=form, preview=form.content.data
)
if "submit" in request.form and form.validate():
topic = form.save(current_user, forum_instance)
# redirect to the new topic
return redirect(url_for('forum.view_topic', topic_id=topic.id))
return render_template(
"forum/new_topic.html", forum=forum_instance, form=form
)
@forum.route("/topic/<int:topic_id>/delete")
@forum.route("/topic/<int:topic_id>-<slug>/delete")
@login_required
def delete_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
if not can_delete_topic(user=current_user, topic=topic):
flash(_("You do not have the permissions to delete this topic."),
"danger")
return redirect(topic.forum.url)
involved_users = User.query.filter(Post.topic_id == topic.id,
User.id == Post.user_id).all()
topic.delete(users=involved_users)
return redirect(url_for("forum.view_forum", forum_id=topic.forum_id))
@forum.route("/topic/<int:topic_id>/lock")
@forum.route("/topic/<int:topic_id>-<slug>/lock")
@login_required
def lock_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
# TODO: Bulk lock
if not can_moderate(user=current_user, forum=topic.forum):
flash(_("You do not have the permissions to lock this topic."),
"danger")
return redirect(topic.url)
topic.locked = True
topic.save()
return redirect(topic.url)
@forum.route("/topic/<int:topic_id>/unlock")
@forum.route("/topic/<int:topic_id>-<slug>/unlock")
@login_required
def unlock_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
# TODO: Bulk unlock
# Unlock is basically the same as lock
if not can_moderate(user=current_user, forum=topic.forum):
flash(_("You do not have the permissions to unlock this topic."),
"danger")
return redirect(topic.url)
topic.locked = False
topic.save()
return redirect(topic.url)
@forum.route("/topic/<int:topic_id>/highlight")
@forum.route("/topic/<int:topic_id>-<slug>/highlight")
@login_required
def highlight_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
if not can_moderate(user=current_user, forum=topic.forum):
flash(_("You do not have the permissions to highlight this topic."),
"danger")
return redirect(topic.url)
topic.important = True
topic.save()
return redirect(topic.url)
@forum.route("/topic/<int:topic_id>/trivialize")
@forum.route("/topic/<int:topic_id>-<slug>/trivialize")
@login_required
def trivialize_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
# Unlock is basically the same as lock
if not can_moderate(user=current_user, forum=topic.forum):
flash(_("You do not have the permissions to trivialize this topic."),
"danger")
return redirect(topic.url)
topic.important = False
topic.save()
return redirect(topic.url)
@forum.route("/topic/<int:topic_id>/move/<int:forum_id>")
@forum.route(
"/topic/<int:topic_id>-<topic_slug>/move/<int:forum_id>-<forum_slug>"
)
@login_required
def move_topic(topic_id, forum_id, topic_slug=None, forum_slug=None):
forum_instance = Forum.query.filter_by(id=forum_id).first_or_404()
topic = Topic.query.filter_by(id=topic_id).first_or_404()
# TODO: Bulk move
if not can_moderate(user=current_user, forum=topic.forum):
flash(_("You do not have the permissions to move this topic."),
"danger")
return redirect(forum_instance.url)
if not topic.move(forum_instance):
flash(_("Could not move the topic to forum %(title)s.",
title=forum_instance.title), "danger")
return redirect(topic.url)
flash(_("Topic was moved to forum %(title)s.",
title=forum_instance.title), "success")
return redirect(topic.url)
@forum.route("/topic/<int:old_id>/merge/<int:new_id>")
@forum.route("/topic/<int:old_id>-<old_slug>/merge/<int:new_id>-<new_slug>")
@login_required
def merge_topic(old_id, new_id, old_slug=None, new_slug=None):
_old_topic = Topic.query.filter_by(id=old_id).first_or_404()
_new_topic = Topic.query.filter_by(id=new_id).first_or_404()
# TODO: Bulk merge
# Looks to me that the user should have permissions on both forums, right?
if not can_moderate(user=current_user, forum=_old_topic.forum):
flash(_("You do not have the permissions to merge this topic."),
"danger")
return redirect(_old_topic.url)
if not _old_topic.merge(_new_topic):
flash(_("Could not merge the topics."), "danger")
return redirect(_old_topic.url)
flash(_("Topics succesfully merged."), "success")
return redirect(_new_topic.url)
@forum.route("/topic/<int:topic_id>/post/new", methods=["POST", "GET"])
@forum.route("/topic/<int:topic_id>-<slug>/post/new", methods=["POST", "GET"])
@login_required
def new_post(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
if not can_post_reply(user=current_user, topic=topic):
flash(_("You do not have the permissions to post in this topic."),
"danger")
return redirect(topic.forum.url)
form = ReplyForm()
if form.validate_on_submit():
if "preview" in request.form:
return render_template(
"forum/new_post.html", topic=topic,
form=form, preview=form.content.data
)
else:
post = form.save(current_user, topic)
return view_post(post.id)
return render_template("forum/new_post.html", topic=topic, form=form)
@forum.route(
"/topic/<int:topic_id>/post/<int:post_id>/reply", methods=["POST", "GET"]
)
@login_required
def reply_post(topic_id, post_id):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
post = Post.query.filter_by(id=post_id).first_or_404()
if not can_post_reply(user=current_user, topic=topic):
flash(_("You do not have the permissions to post in this topic."),
"danger")
return redirect(topic.forum.url)
form = ReplyForm()
if form.validate_on_submit():
if "preview" in request.form:
return render_template(
"forum/new_post.html", topic=topic,
form=form, preview=form.content.data
)
else:
form.save(current_user, topic)
return redirect(post.topic.url)
else:
form.content.data = format_quote(post)
return render_template("forum/new_post.html", topic=post.topic, form=form)
@forum.route("/post/<int:post_id>/edit", methods=["POST", "GET"])
@login_required
def edit_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
if not can_edit_post(user=current_user, post=post):
flash(_("You do not have the permissions to edit this post."), "danger")
return redirect(post.topic.url)
form = ReplyForm()
if form.validate_on_submit():
if "preview" in request.form:
return render_template(
"forum/new_post.html", topic=post.topic,
form=form, preview=form.content.data
)
else:
form.populate_obj(post)
post.date_modified = datetime.datetime.utcnow()
post.modified_by = current_user.username
post.save()
return redirect(post.topic.url)
else:
form.content.data = post.content
return render_template("forum/new_post.html", topic=post.topic, form=form)
@forum.route("/post/<int:post_id>/delete")
@login_required
def delete_post(post_id, slug=None):
post = Post.query.filter_by(id=post_id).first_or_404()
# TODO: Bulk delete
if not can_delete_post(user=current_user, post=post):
flash(_("You do not have the permissions to delete this post."),
"danger")
return redirect(post.topic.url)
first_post = post.first_post
topic_url = post.topic.url
forum_url = post.topic.forum.url
post.delete()
# If the post was the first post in the topic, redirect to the forums
if first_post:
return redirect(forum_url)
return redirect(topic_url)
@forum.route("/post/<int:post_id>/report", methods=["GET", "POST"])
@login_required
def report_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
form = ReportForm()
if form.validate_on_submit():
form.save(current_user, post)
flash(_("Thanks for reporting."), "success")
return render_template("forum/report_post.html", form=form)
@forum.route("/post/<int:post_id>/raw", methods=["POST", "GET"])
@login_required
def raw_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
return format_quote(post)
@forum.route("/markread")
@forum.route("/<int:forum_id>/markread")
@forum.route("/<int:forum_id>-<slug>/markread")
@login_required
def markread(forum_id=None, slug=None):
# Mark a single forum as read
if forum_id:
forum_instance = Forum.query.filter_by(id=forum_id).first_or_404()
forumsread = ForumsRead.query.filter_by(
user_id=current_user.id, forum_id=forum_instance.id
).first()
TopicsRead.query.filter_by(user_id=current_user.id,
forum_id=forum_instance.id).delete()
if not forumsread:
forumsread = ForumsRead()
forumsread.user_id = current_user.id
forumsread.forum_id = forum_instance.id
forumsread.last_read = datetime.datetime.utcnow()
forumsread.cleared = datetime.datetime.utcnow()
db.session.add(forumsread)
db.session.commit()
flash(_("Forum %(forum)s marked as read.", forum=forum_instance.title),
"success")
return redirect(forum_instance.url)
# Mark all forums as read
ForumsRead.query.filter_by(user_id=current_user.id).delete()
TopicsRead.query.filter_by(user_id=current_user.id).delete()
forums = Forum.query.all()
forumsread_list = []
for forum_instance in forums:
forumsread = ForumsRead()
forumsread.user_id = current_user.id
forumsread.forum_id = forum_instance.id
forumsread.last_read = datetime.datetime.utcnow()
forumsread.cleared = datetime.datetime.utcnow()
forumsread_list.append(forumsread)
db.session.add_all(forumsread_list)
db.session.commit()
flash(_("All forums marked as read."), "success")
return redirect(url_for("forum.index"))
@forum.route("/who_is_online")
def who_is_online():
if current_app.config['REDIS_ENABLED']:
online_users = get_online_users()
else:
online_users = User.query.filter(User.lastseen >= time_diff()).all()
return render_template("forum/online_users.html",
online_users=online_users)
@forum.route("/memberlist", methods=['GET', 'POST'])
def memberlist():
page = request.args.get('page', 1, type=int)
search_form = UserSearchForm()
if search_form.validate():
users = search_form.get_results().\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("forum/memberlist.html", users=users,
search_form=search_form)
else:
users = User.query.\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("forum/memberlist.html", users=users,
search_form=search_form)
@forum.route("/topictracker")
@login_required
def topictracker():
page = request.args.get("page", 1, type=int)
topics = current_user.tracked_topics.\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == current_user.id)).\
add_entity(TopicsRead).\
order_by(Topic.last_updated.desc()).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], True)
return render_template("forum/topictracker.html", topics=topics)
@forum.route("/topictracker/<int:topic_id>/add")
@forum.route("/topictracker/<int:topic_id>-<slug>/add")
@login_required
def track_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
current_user.track_topic(topic)
current_user.save()
return redirect(topic.url)
@forum.route("/topictracker/<int:topic_id>/delete")
@forum.route("/topictracker/<int:topic_id>-<slug>/delete")
@login_required
def untrack_topic(topic_id, slug=None):
topic = Topic.query.filter_by(id=topic_id).first_or_404()
current_user.untrack_topic(topic)
current_user.save()
return redirect(topic.url)
@forum.route("/search", methods=['GET', 'POST'])
def search():
form = SearchPageForm()
if form.validate_on_submit():
result = form.get_results()
return render_template('forum/search_result.html', form=form,
result=result)
return render_template('forum/search_form.html', form=form)
|
|
import errno
import os
import requests
import sys
import signal
import webbrowser
from contextlib import closing
from distutils.version import StrictVersion
from functools import partial
from itertools import chain
from time import sleep
from streamlink import (Streamlink, StreamError, PluginError,
NoPluginError)
from streamlink.cache import Cache
from streamlink.stream import StreamProcess
from streamlink.plugins.twitch import TWITCH_CLIENT_ID
from .argparser import parser
from .compat import stdout, is_win32
from .console import ConsoleOutput
from .constants import CONFIG_FILES, PLUGINS_DIR, STREAM_SYNONYMS
from .output import FileOutput, PlayerOutput
from .utils import NamedPipe, HTTPServer, ignored, progress, stream_to_url
ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET)
QUIET_OPTIONS = ("json", "stream_url", "subprocess_cmdline", "quiet")
args = console = streamlink = plugin = stream_fd = output = None
def check_file_output(filename, force):
"""Checks if file already exists and ask the user if it should
be overwritten if it does."""
console.logger.debug("Checking file output")
if os.path.isfile(filename) and not force:
answer = console.ask("File {0} already exists! Overwrite it? [y/N] ",
filename)
if answer.lower() != "y":
sys.exit()
return FileOutput(filename)
def create_output():
"""Decides where to write the stream.
Depending on arguments it can be one of these:
- The stdout pipe
- A subprocess' stdin pipe
- A named pipe that the subprocess reads from
- A regular file
"""
if args.output:
if args.output == "-":
out = FileOutput(fd=stdout)
else:
out = check_file_output(args.output, args.force)
elif args.stdout:
out = FileOutput(fd=stdout)
else:
http = namedpipe = None
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
if args.player_fifo:
pipename = "streamlinkpipe-{0}".format(os.getpid())
console.logger.info("Creating pipe {0}", pipename)
try:
namedpipe = NamedPipe(pipename)
except IOError as err:
console.exit("Failed to create pipe: {0}", err)
elif args.player_http:
http = create_http_server()
console.logger.info("Starting player: {0}", args.player)
out = PlayerOutput(args.player, args=args.player_args,
quiet=not args.verbose_player,
kill=not args.player_no_close,
namedpipe=namedpipe, http=http)
return out
def create_http_server(host=None, port=0):
"""Creates a HTTP server listening on a given host and port.
If host is empty, listen on all available interfaces, and if port is 0,
listen on a random high port.
"""
try:
http = HTTPServer()
http.bind(host=host, port=port)
except OSError as err:
console.exit("Failed to create HTTP server: {0}", err)
return http
def iter_http_requests(server, player):
"""Repeatedly accept HTTP connections on a server.
Forever if the serving externally, or while a player is running if it is not
empty.
"""
while not player or player.running:
try:
yield server.open(timeout=2.5)
except OSError:
continue
def output_stream_http(plugin, initial_streams, external=False, port=0):
"""Continuously output the stream over HTTP."""
global output
if not external:
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
server = create_http_server()
player = output = PlayerOutput(args.player, args=args.player_args,
filename=server.url,
quiet=not args.verbose_player)
try:
console.logger.info("Starting player: {0}", args.player)
if player:
player.open()
except OSError as err:
console.exit("Failed to start player: {0} ({1})",
args.player, err)
else:
server = create_http_server(host=None, port=port)
player = None
console.logger.info("Starting server, access with one of:")
for url in server.urls:
console.logger.info(" " + url)
for req in iter_http_requests(server, player):
user_agent = req.headers.get("User-Agent") or "unknown player"
console.logger.info("Got HTTP request from {0}".format(user_agent))
stream_fd = prebuffer = None
while not stream_fd and (not player or player.running):
try:
streams = initial_streams or fetch_streams(plugin)
initial_streams = None
for stream_name in (resolve_stream_name(streams, s) for s in args.stream):
if stream_name in streams:
stream = streams[stream_name]
break
else:
console.logger.info("Stream not available, will re-fetch "
"streams in 10 sec")
sleep(10)
continue
except PluginError as err:
console.logger.error(u"Unable to fetch new streams: {0}", err)
continue
try:
console.logger.info("Opening stream: {0} ({1})", stream_name,
type(stream).shortname())
stream_fd, prebuffer = open_stream(stream)
except StreamError as err:
console.logger.error("{0}", err)
if stream_fd and prebuffer:
console.logger.debug("Writing stream to player")
read_stream(stream_fd, server, prebuffer)
server.close(True)
player.close()
server.close()
def output_stream_passthrough(stream):
"""Prepares a filename to be passed to the player."""
global output
filename = '"{0}"'.format(stream_to_url(stream))
output = PlayerOutput(args.player, args=args.player_args,
filename=filename, call=True,
quiet=not args.verbose_player)
try:
console.logger.info("Starting player: {0}", args.player)
output.open()
except OSError as err:
console.exit("Failed to start player: {0} ({1})", args.player, err)
return False
return True
def open_stream(stream):
"""Opens a stream and reads 8192 bytes from it.
This is useful to check if a stream actually has data
before opening the output.
"""
global stream_fd
# Attempts to open the stream
try:
stream_fd = stream.open()
except StreamError as err:
raise StreamError("Could not open stream: {0}".format(err))
# Read 8192 bytes before proceeding to check for errors.
# This is to avoid opening the output unnecessarily.
try:
console.logger.debug("Pre-buffering 8192 bytes")
prebuffer = stream_fd.read(8192)
except IOError as err:
raise StreamError("Failed to read data from stream: {0}".format(err))
if not prebuffer:
raise StreamError("No data returned from stream")
return stream_fd, prebuffer
def output_stream(stream):
"""Open stream, create output and finally write the stream to output."""
global output
for i in range(args.retry_open):
try:
stream_fd, prebuffer = open_stream(stream)
break
except StreamError as err:
console.logger.error("{0}", err)
else:
return
output = create_output()
try:
output.open()
except (IOError, OSError) as err:
if isinstance(output, PlayerOutput):
console.exit("Failed to start player: {0} ({1})",
args.player, err)
else:
console.exit("Failed to open output: {0} ({1})",
args.output, err)
with closing(output):
console.logger.debug("Writing stream to output")
read_stream(stream_fd, output, prebuffer)
return True
def read_stream(stream, output, prebuffer, chunk_size=8192):
"""Reads data from stream and then writes it to the output."""
is_player = isinstance(output, PlayerOutput)
is_http = isinstance(output, HTTPServer)
is_fifo = is_player and output.namedpipe
show_progress = isinstance(output, FileOutput) and output.fd is not stdout
stream_iterator = chain(
[prebuffer],
iter(partial(stream.read, chunk_size), b"")
)
if show_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.output))
try:
for data in stream_iterator:
# We need to check if the player process still exists when
# using named pipes on Windows since the named pipe is not
# automatically closed by the player.
if is_win32 and is_fifo:
output.player.poll()
if output.player.returncode is not None:
console.logger.info("Player closed")
break
try:
output.write(data)
except IOError as err:
if is_player and err.errno in ACCEPTABLE_ERRNO:
console.logger.info("Player closed")
elif is_http and err.errno in ACCEPTABLE_ERRNO:
console.logger.info("HTTP connection closed")
else:
console.logger.error("Error when writing to output: {0}", err)
break
except IOError as err:
console.logger.error("Error when reading from stream: {0}", err)
stream.close()
console.logger.info("Stream ended")
def handle_stream(plugin, streams, stream_name):
"""Decides what to do with the selected stream.
Depending on arguments it can be one of these:
- Output internal command-line
- Output JSON represenation
- Continuously output the stream over HTTP
- Output stream data to selected output
"""
stream_name = resolve_stream_name(streams, stream_name)
stream = streams[stream_name]
# Print internal command-line if this stream
# uses a subprocess.
if args.subprocess_cmdline:
if isinstance(stream, StreamProcess):
try:
cmdline = stream.cmdline()
except StreamError as err:
console.exit("{0}", err)
console.msg("{0}", cmdline)
else:
console.exit("The stream specified cannot be translated to a command")
# Print JSON representation of the stream
elif console.json:
console.msg_json(stream)
elif args.stream_url:
try:
console.msg("{0}", stream.to_url())
except TypeError:
console.exit("The stream specified cannot be translated to a URL")
# Output the stream
else:
# Find any streams with a '_alt' suffix and attempt
# to use these in case the main stream is not usable.
alt_streams = list(filter(lambda k: stream_name + "_alt" in k,
sorted(streams.keys())))
file_output = args.output or args.stdout
for stream_name in [stream_name] + alt_streams:
stream = streams[stream_name]
stream_type = type(stream).shortname()
if stream_type in args.player_passthrough and not file_output:
console.logger.info("Opening stream: {0} ({1})", stream_name,
stream_type)
success = output_stream_passthrough(stream)
elif args.player_external_http:
return output_stream_http(plugin, streams, external=True,
port=args.player_external_http_port)
elif args.player_continuous_http and not file_output:
return output_stream_http(plugin, streams)
else:
console.logger.info("Opening stream: {0} ({1})", stream_name,
stream_type)
success = output_stream(stream)
if success:
break
def fetch_streams(plugin):
"""Fetches streams using correct parameters."""
return plugin.get_streams(stream_types=args.stream_types,
sorting_excludes=args.stream_sorting_excludes)
def fetch_streams_infinite(plugin, interval):
"""Attempts to fetch streams until some are returned."""
try:
streams = fetch_streams(plugin)
except PluginError as err:
console.logger.error(u"{0}", err)
streams = None
if not streams:
console.logger.info("Waiting for streams, retrying every {0} "
"second(s)", interval)
while not streams:
sleep(interval)
try:
streams = fetch_streams(plugin)
except PluginError as err:
console.logger.error(u"{0}", err)
return streams
def resolve_stream_name(streams, stream_name):
"""Returns the real stream name of a synonym."""
if stream_name in STREAM_SYNONYMS and stream_name in streams:
for name, stream in streams.items():
if stream is streams[stream_name] and name not in STREAM_SYNONYMS:
return name
return stream_name
def format_valid_streams(plugin, streams):
"""Formats a dict of streams.
Filters out synonyms and displays them next to
the stream they point to.
Streams are sorted according to their quality
(based on plugin.stream_weight).
"""
delimiter = ", "
validstreams = []
for name, stream in sorted(streams.items(),
key=lambda stream: plugin.stream_weight(stream[0])):
if name in STREAM_SYNONYMS:
continue
def synonymfilter(n):
return stream is streams[n] and n is not name
synonyms = list(filter(synonymfilter, streams.keys()))
if len(synonyms) > 0:
joined = delimiter.join(synonyms)
name = "{0} ({1})".format(name, joined)
validstreams.append(name)
return delimiter.join(validstreams)
def handle_url():
"""The URL handler.
Attempts to resolve the URL to a plugin and then attempts
to fetch a list of available streams.
Proceeds to handle stream if user specified a valid one,
otherwise output list of valid streams.
"""
try:
plugin = streamlink.resolve_url(args.url)
console.logger.info("Found matching plugin {0} for URL {1}",
plugin.module, args.url)
if args.retry_streams:
streams = fetch_streams_infinite(plugin, args.retry_streams)
else:
streams = fetch_streams(plugin)
except NoPluginError:
console.exit("No plugin can handle URL: {0}", args.url)
except PluginError as err:
console.exit(u"{0}", err)
if not streams:
console.exit("No playable streams found on this URL: {0}", args.url)
if args.best_stream_default:
args.default_stream = ["best"]
if args.default_stream and not args.stream and not args.json:
args.stream = args.default_stream
if args.stream:
validstreams = format_valid_streams(plugin, streams)
for stream_name in args.stream:
if stream_name in streams:
console.logger.info("Available streams: {0}", validstreams)
handle_stream(plugin, streams, stream_name)
return
err = ("The specified stream(s) '{0}' could not be "
"found".format(", ".join(args.stream)))
if console.json:
console.msg_json(dict(streams=streams, plugin=plugin.module,
error=err))
else:
console.exit("{0}.\n Available streams: {1}",
err, validstreams)
else:
if console.json:
console.msg_json(dict(streams=streams, plugin=plugin.module))
else:
validstreams = format_valid_streams(plugin, streams)
console.msg("Available streams: {0}", validstreams)
def print_plugins():
"""Outputs a list of all plugins Streamlink has loaded."""
pluginlist = list(streamlink.get_plugins().keys())
pluginlist_formatted = ", ".join(sorted(pluginlist))
if console.json:
console.msg_json(pluginlist)
else:
console.msg("Loaded plugins: {0}", pluginlist_formatted)
def authenticate_twitch_oauth():
"""Opens a web browser to allow the user to grant Streamlink
access to their Twitch account."""
client_id = TWITCH_CLIENT_ID
redirect_uri = "https://streamlink.github.io/twitch_oauth.html"
url = ("https://api.twitch.tv/kraken/oauth2/authorize/"
"?response_type=token&client_id={0}&redirect_uri="
"{1}&scope=user_read+user_subscriptions").format(client_id, redirect_uri)
console.msg("Attempting to open a browser to let you authenticate "
"Streamlink with Twitch")
try:
if not webbrowser.open_new_tab(url):
raise webbrowser.Error
except webbrowser.Error:
console.exit("Unable to open a web browser, try accessing this URL "
"manually instead:\n{0}".format(url))
def load_plugins(dirs):
"""Attempts to load plugins from a list of directories."""
dirs = [os.path.expanduser(d) for d in dirs]
for directory in dirs:
if os.path.isdir(directory):
streamlink.load_plugins(directory)
else:
console.logger.warning("Plugin path {0} does not exist or is not "
"a directory!", directory)
def setup_args(config_files=[]):
"""Parses arguments."""
global args
arglist = sys.argv[1:]
# Load arguments from config files
for config_file in filter(os.path.isfile, config_files):
arglist.insert(0, "@" + config_file)
args = parser.parse_args(arglist)
# Force lowercase to allow case-insensitive lookup
if args.stream:
args.stream = [stream.lower() for stream in args.stream]
def setup_config_args():
config_files = []
if args.url:
with ignored(NoPluginError):
plugin = streamlink.resolve_url(args.url)
config_files += ["{0}.{1}".format(fn, plugin.module) for fn in CONFIG_FILES]
if args.config:
# We want the config specified last to get highest priority
config_files += list(reversed(args.config))
else:
# Only load first available default config
for config_file in filter(os.path.isfile, CONFIG_FILES):
config_files.append(config_file)
break
if config_files:
setup_args(config_files)
def setup_console():
"""Console setup."""
global console
# All console related operations is handled via the ConsoleOutput class
console = ConsoleOutput(sys.stdout, streamlink)
# Console output should be on stderr if we are outputting
# a stream to stdout.
if args.stdout or args.output == "-":
console.set_output(sys.stderr)
# We don't want log output when we are printing JSON or a command-line.
if not any(getattr(args, attr) for attr in QUIET_OPTIONS):
console.set_level(args.loglevel)
if args.quiet_player:
console.logger.warning("The option --quiet-player is deprecated since "
"version 1.4.3 as hiding player output is now "
"the default.")
if args.best_stream_default:
console.logger.warning("The option --best-stream-default is deprecated "
"since version 1.9.0, use '--default-stream best' "
"instead.")
console.json = args.json
# Handle SIGTERM just like SIGINT
signal.signal(signal.SIGTERM, signal.default_int_handler)
def setup_http_session():
"""Sets the global HTTP settings, such as proxy and headers."""
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
if args.http_cookies:
console.logger.warning("The option --http-cookies is deprecated since "
"version 1.11.0, use --http-cookie instead.")
streamlink.set_option("http-cookies", args.http_cookies)
if args.http_headers:
console.logger.warning("The option --http-headers is deprecated since "
"version 1.11.0, use --http-header instead.")
streamlink.set_option("http-headers", args.http_headers)
if args.http_query_params:
console.logger.warning("The option --http-query-params is deprecated since "
"version 1.11.0, use --http-query-param instead.")
streamlink.set_option("http-query-params", args.http_query_params)
def setup_plugins():
"""Loads any additional plugins."""
if os.path.isdir(PLUGINS_DIR):
load_plugins([PLUGINS_DIR])
if args.plugin_dirs:
load_plugins(args.plugin_dirs)
def setup_streamlink():
"""Creates the Streamlink session."""
global streamlink
streamlink = Streamlink()
def setup_options():
"""Sets Streamlink options."""
if args.hls_live_edge:
streamlink.set_option("hls-live-edge", args.hls_live_edge)
if args.hls_segment_attempts:
streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts)
if args.hls_playlist_reload_attempts:
streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts)
if args.hls_segment_threads:
streamlink.set_option("hls-segment-threads", args.hls_segment_threads)
if args.hls_segment_timeout:
streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout)
if args.hls_timeout:
streamlink.set_option("hls-timeout", args.hls_timeout)
if args.hls_audio_select:
streamlink.set_option("hls-audio-select", args.hls_audio_select)
if args.hds_live_edge:
streamlink.set_option("hds-live-edge", args.hds_live_edge)
if args.hds_segment_attempts:
streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts)
if args.hds_segment_threads:
streamlink.set_option("hds-segment-threads", args.hds_segment_threads)
if args.hds_segment_timeout:
streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout)
if args.hds_timeout:
streamlink.set_option("hds-timeout", args.hds_timeout)
if args.http_stream_timeout:
streamlink.set_option("http-stream-timeout", args.http_stream_timeout)
if args.ringbuffer_size:
streamlink.set_option("ringbuffer-size", args.ringbuffer_size)
if args.rtmp_proxy:
streamlink.set_option("rtmp-proxy", args.rtmp_proxy)
if args.rtmp_rtmpdump:
streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump)
if args.rtmp_timeout:
streamlink.set_option("rtmp-timeout", args.rtmp_timeout)
if args.stream_segment_attempts:
streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts)
if args.stream_segment_threads:
streamlink.set_option("stream-segment-threads", args.stream_segment_threads)
if args.stream_segment_timeout:
streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout)
if args.stream_timeout:
streamlink.set_option("stream-timeout", args.stream_timeout)
if args.ffmpeg_ffmpeg:
streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg)
if args.ffmpeg_verbose:
streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose)
if args.ffmpeg_verbose_path:
streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path)
if args.ffmpeg_video_transcode:
streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode)
if args.ffmpeg_audio_transcode:
streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode)
streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog)
streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path)
streamlink.set_option("locale", args.locale)
# Deprecated options
if args.hds_fragment_buffer:
console.logger.warning("The option --hds-fragment-buffer is deprecated "
"and will be removed in the future. Use "
"--ringbuffer-size instead")
def setup_plugin_options():
"""Sets Streamlink plugin options."""
if args.twitch_cookie:
streamlink.set_plugin_option("twitch", "cookie",
args.twitch_cookie)
if args.twitch_oauth_token:
streamlink.set_plugin_option("twitch", "oauth_token",
args.twitch_oauth_token)
if args.twitch_disable_hosting:
streamlink.set_plugin_option("twitch", "disable_hosting",
args.twitch_disable_hosting)
if args.ustream_password:
streamlink.set_plugin_option("ustreamtv", "password",
args.ustream_password)
if args.crunchyroll_username:
streamlink.set_plugin_option("crunchyroll", "username",
args.crunchyroll_username)
if args.crunchyroll_username and not args.crunchyroll_password:
crunchyroll_password = console.askpass("Enter Crunchyroll password: ")
else:
crunchyroll_password = args.crunchyroll_password
if crunchyroll_password:
streamlink.set_plugin_option("crunchyroll", "password",
crunchyroll_password)
if args.crunchyroll_purge_credentials:
streamlink.set_plugin_option("crunchyroll", "purge_credentials",
args.crunchyroll_purge_credentials)
if args.crunchyroll_session_id:
streamlink.set_plugin_option("crunchyroll", "session_id",
args.crunchyroll_session_id)
if args.crunchyroll_locale:
streamlink.set_plugin_option("crunchyroll", "locale",
args.crunchyroll_locale)
if args.livestation_email:
streamlink.set_plugin_option("livestation", "email",
args.livestation_email)
if args.livestation_password:
streamlink.set_plugin_option("livestation", "password",
args.livestation_password)
if args.btv_username:
streamlink.set_plugin_option("btv", "username", args.btv_username)
if args.btv_username and not args.btv_password:
btv_password = console.askpass("Enter BTV password: ")
else:
btv_password = args.btv_password
if btv_password:
streamlink.set_plugin_option("btv", "password", btv_password)
if args.schoolism_email:
streamlink.set_plugin_option("schoolism", "email", args.schoolism_email)
if args.schoolism_email and not args.schoolism_password:
schoolism_password = console.askpass("Enter Schoolism password: ")
else:
schoolism_password = args.schoolism_password
if schoolism_password:
streamlink.set_plugin_option("schoolism", "password", schoolism_password)
if args.schoolism_part:
streamlink.set_plugin_option("schoolism", "part", args.schoolism_part)
if args.daisuki_mux_subtitles:
streamlink.set_plugin_option("daisuki", "mux_subtitles", args.daisuki_mux_subtitles)
if args.rtve_mux_subtitles:
streamlink.set_plugin_option("rtve", "mux_subtitles", args.rtve_mux_subtitles)
if args.funimation_mux_subtitles:
streamlink.set_plugin_option("funimationnow", "mux_subtitles", True)
if args.funimation_language:
# map en->english, ja->japanese
lang = {"en": "english", "ja": "japanese"}.get(args.funimation_language.lower(),
args.funimation_language.lower())
streamlink.set_plugin_option("funimationnow", "language", lang)
if args.tvplayer_email:
streamlink.set_plugin_option("tvplayer", "email", args.tvplayer_email)
if args.tvplayer_email and not args.tvplayer_password:
tvplayer_password = console.askpass("Enter TVPlayer password: ")
else:
tvplayer_password = args.tvplayer_password
if tvplayer_password:
streamlink.set_plugin_option("tvplayer", "password", tvplayer_password)
if args.pluzz_mux_subtitles:
streamlink.set_plugin_option("pluzz", "mux_subtitles", args.pluzz_mux_subtitles)
if args.wwenetwork_email:
streamlink.set_plugin_option("wwenetwork", "email", args.wwenetwork_email)
if args.wwenetwork_email and not args.wwenetwork_password:
wwenetwork_password = console.askpass("Enter WWE Network password: ")
else:
wwenetwork_password = args.wwenetwork_password
if wwenetwork_password:
streamlink.set_plugin_option("wwenetwork", "password", wwenetwork_password)
if args.animelab_email:
streamlink.set_plugin_option("animelab", "email", args.animelab_email)
if args.animelab_email and not args.animelab_password:
animelab_password = console.askpass("Enter AnimeLab password: ")
else:
animelab_password = args.animelab_password
if animelab_password:
streamlink.set_plugin_option("animelab", "password", animelab_password)
if args.npo_subtitles:
streamlink.set_plugin_option("npo", "subtitles", args.npo_subtitles)
# Deprecated options
if args.jtv_legacy_names:
console.logger.warning("The option --jtv/twitch-legacy-names is "
"deprecated and will be removed in the future.")
if args.jtv_cookie:
console.logger.warning("The option --jtv-cookie is deprecated and "
"will be removed in the future.")
if args.jtv_password:
console.logger.warning("The option --jtv-password is deprecated "
"and will be removed in the future.")
if args.gomtv_username:
console.logger.warning("The option --gomtv-username is deprecated "
"and will be removed in the future.")
if args.gomtv_password:
console.logger.warning("The option --gomtv-password is deprecated "
"and will be removed in the future.")
if args.gomtv_cookie:
console.logger.warning("The option --gomtv-cookie is deprecated "
"and will be removed in the future.")
def check_root():
if hasattr(os, "getuid"):
if os.geteuid() == 0:
console.logger.info("streamlink is running as root! Be careful!")
def check_version(force=False):
cache = Cache(filename="cli.json")
latest_version = cache.get("latest_version")
if force or not latest_version:
res = requests.get("https://pypi.python.org/pypi/streamlink/json")
data = res.json()
latest_version = data.get("info").get("version")
cache.set("latest_version", latest_version, (60 * 60 * 24))
version_info_printed = cache.get("version_info_printed")
if not force and version_info_printed:
return
installed_version = StrictVersion(streamlink.version)
latest_version = StrictVersion(latest_version)
if latest_version > installed_version:
console.logger.info("A new version of Streamlink ({0}) is "
"available!".format(latest_version))
cache.set("version_info_printed", True, (60 * 60 * 6))
elif force:
console.logger.info("Your Streamlink version ({0}) is up to date!",
installed_version)
if force:
sys.exit()
def main():
setup_args()
setup_streamlink()
setup_plugins()
setup_config_args()
setup_console()
setup_http_session()
check_root()
if args.version_check or (not args.no_version_check and args.auto_version_check):
with ignored(Exception):
check_version(force=args.version_check)
if args.plugins:
print_plugins()
elif args.can_handle_url:
try:
streamlink.resolve_url(args.can_handle_url)
except NoPluginError:
sys.exit(1)
else:
sys.exit(0)
elif args.can_handle_url_no_redirect:
try:
streamlink.resolve_url_no_redirect(args.can_handle_url_no_redirect)
except NoPluginError:
sys.exit(1)
else:
sys.exit(0)
elif args.url:
try:
setup_options()
setup_plugin_options()
handle_url()
except KeyboardInterrupt:
# Close output
if output:
output.close()
console.msg("Interrupted! Exiting...")
finally:
if stream_fd:
try:
console.logger.info("Closing currently open stream...")
stream_fd.close()
except KeyboardInterrupt:
sys.exit()
elif args.twitch_oauth_authenticate:
authenticate_twitch_oauth()
elif args.help:
parser.print_help()
else:
usage = parser.format_usage()
msg = (
"{usage}\nUse -h/--help to see the available options or "
"read the manual at https://streamlink.github.io"
).format(usage=usage)
console.msg(msg)
|
|
"""
Results for VARMAX tests
Results from Stata using script `test_varmax_stata.do`.
See also Stata time series documentation, in particular `dfactor`.
Data from:
http://www.jmulti.de/download/datasets/e1.dat
Author: Chad Fulton
License: Simplified-BSD
"""
lutkepohl_dfm = {
'params': [
.0063728, .00660177, .00636009, # Factor loadings
.00203899, .00009016, .00005348, # Idiosyncratic variances
.33101874, .63927819, # Factor transitions
],
'bse_oim': [
.002006, .0012514, .0012128, # Factor loadings
.0003359, .0000184, .0000141, # Idiosyncratic variances
.1196637, .1218577, # Factor transitions
],
'loglike': 594.0902026190786,
'aic': -1172.18,
'bic': -1153.641,
}
lutkepohl_dfm2 = {
'params': [
.03411188, .03478764, # Factor loadings: y1
.03553366, .0344871, # Factor loadings: y2
.03536757, .03433391, # Factor loadings: y3
.00224401, .00014678, .00010922, # Idiosyncratic variances
.08845946, .08862982, # Factor transitions: Phi, row 1
.08754759, .08758589 # Phi, row 2
],
'bse_oim': None,
'loglike': 496.379832917306,
'aic': -974.7597,
'bic': -953.9023,
}
lutkepohl_dfm_exog1 = {
'params': [
-.01254697, -.00734604, -.00671296, # Factor loadings
.01803325, .02066737, .01983089, # Beta.constant
.00198667, .00008426, .00005684, # Idiosyncratic variances
.31140829, # Factor transition
],
'var_oim': [
.00004224, 2.730e-06, 3.625e-06,
.00003087, 2.626e-06, 2.013e-06,
1.170e-07, 5.133e-10, 3.929e-10,
.07412117
],
'loglike': 596.9781590009525,
'aic': -1173.956,
'bic': -1150.781,
}
lutkepohl_dfm_exog2 = {
'params': [
.01249096, .00731147, .00680776, # Factor loadings
.02187812, -.00009851, # Betas, y1
.02302646, -.00006045, # Betas, y2
.02009233, -6.683e-06, # Betas, y3
.0019856, .00008378, .00005581, # Idiosyncratic variances
.2995768, # Factor transition
],
'var_oim': [
.00004278, 2.659e-06, 3.766e-06,
.00013003, 6.536e-08,
.00001079, 5.424e-09,
8.393e-06, 4.217e-09,
1.168e-07, 5.140e-10, 4.181e-10,
.07578382,
],
'loglike': 597.4550537198315,
'aic': -1168.91,
'bic': -1138.783,
}
lutkepohl_dfm_gen = {
'params': [
.00312295, .00332555, .00318837, # Factor loadings
# .00195462, # Covariance, lower triangle
# 3.642e-06, .00010047,
# .00007018, .00002565, .00006118
# Note: the following are the Cholesky of the covariance
# matrix defined just above
.04421108, # Cholesky, lower triangle
.00008238, .01002313,
.00158738, .00254603, .00722343,
.987374, # Factor transition
-.25613562, .00392166, .44859028, # Error transition parameters
.01635544, -.249141, .08170863,
-.02280001, .02059063, -.41808254
],
'var_oim': [
1.418e-06, 1.030e-06, 9.314e-07, # Factor loadings
None, # Cholesky, lower triangle
None, None,
None, None, None,
.00021421, # Factor transition
.01307587, .29167522, .43204063, # Error transition parameters
.00076899, .01742173, .0220161,
.00055435, .01456365, .01707167
],
'loglike': 607.7715711926285,
'aic': -1177.543,
'bic': -1133.511,
}
lutkepohl_dfm_ar2 = {
'params': [
.00419132, .0044007, .00422976, # Factor loadings
.00188101, .0000786, .0000418, # Idiosyncratic variance
.97855802, # Factor transition
-.28856258, -.14910552, # Error transition parameters
-.41544832, -.26706536,
-.72661178, -.27278821,
],
'var_oim': [
1.176e-06, 7.304e-07, 6.726e-07, # Factor loadings
9.517e-08, 2.300e-10, 1.389e-10, # Idiosyncratic variance
.00041159, # Factor transition
.0131511, .01296008, # Error transition parameters
.01748435, .01616862,
.03262051, .02546648,
],
'loglike': 607.4203109232711,
'aic': -1188.841,
'bic': -1158.713,
}
lutkepohl_dfm_scalar = {
'params': [
.04424851, .00114077, .00275081, # Factor loadings
.01812298, .02071169, .01987196, # Beta.constant
.00012067, # Idiosyncratic variance
-.19915198, # Factor transition
],
'var_oim': [
.00001479, 1.664e-06, 1.671e-06,
.00001985, 1.621e-06, 1.679e-06,
1.941e-10,
.01409482
],
'loglike': 588.7677809701966,
'aic': -1161.536,
'bic': -1142.996,
}
lutkepohl_sfm = {
'params': [
.02177607, .02089956, .02239669, # Factor loadings
.00201477, .00013623, 7.452e-16 # Idiosyncratic variance
],
'var_oim': [
.00003003, 4.729e-06, 3.344e-06,
1.083e-07, 4.950e-10, 0
],
'loglike': 532.2215594949788,
'aic': -1054.443,
'bic': -1042.856,
}
lutkepohl_sur = {
'params': [
.02169026, -.00009184, # Betas, y1
.0229165, -.00005654, # Betas, y2
.01998994, -3.049e-06, # Betas, y3
# .00215703, # Covariance, lower triangle
# .0000484, .00014252,
# .00012772, .00005642, .00010673,
# Note: the following are the Cholesky of the covariance
# matrix defined just above
.04644384, # Cholesky, lower triangle
.00104212, .0118926,
.00274999, .00450315, .00888196,
],
'var_oim': [
.0001221, 6.137e-08,
8.067e-06, 4.055e-09,
6.042e-06, 3.036e-09,
None,
None, None,
None, None, None
],
'loglike': 597.6181259116113,
'aic': -1171.236,
'bic': -1143.426,
}
lutkepohl_sur_auto = {
'params': [
.02243063, -.00011112, # Betas, y1
.02286952, -.0000554, # Betas, y2
.0020338, .00013843, # Idiosyncratic variance
-.21127833, .50884609, # Error transition parameters
.04292935, .00855789,
],
'var_oim': [
.00008357, 4.209e-08,
8.402e-06, 4.222e-09,
1.103e-07, 5.110e-10,
.01259537, .19382105,
.00085936, .01321035,
],
'loglike': 352.7250284160132,
'aic': -685.4501,
'bic': -662.2752
}
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: kwc
import rosdistro
from rosdistro.manifest_provider import get_release_tag
from rospkg import distro as rospkg_distro
import yaml
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
BRANCH_RELEASE = 'release'
BRANCH_DEVEL = 'devel'
class InvalidData(Exception):
pass
def build_rosinstall(repo_name, uri, vcs_type, version, prefix):
"""
Build a rosinstall file given some basic information
"""
rosinstall = []
repo_name = repo_name if not prefix else '/'.join([prefix, repo_name])
if version:
rosinstall.append({vcs_type: {'local-name': repo_name,
'uri': uri, 'version': version}})
else:
rosinstall.append({vcs_type: {'local-name': repo_name, 'uri': uri}})
# return yaml.dump(rosinstall, default_flow_style=False)
return rosinstall
def get_wet_info(wet_distro, name):
"""
Get information about wet packages or stacks
"""
repos = wet_distro['repositories']
for repo in repos:
info = repos[repo]
if repo == name or name in info.get('packages', []):
return (repo, info)
return None
def get_dry_info(dry_distro, name):
"""
Get information about dry stacks
"""
dry_stacks = dry_distro.get_stacks(True)
if name in dry_stacks:
stack = dry_stacks[name]
if stack.vcs_config.type == 'svn':
return (name,
stack.vcs_config.release_tag, stack.vcs_config.type, None)
else:
return (name,
stack.vcs_config.anon_repo_uri, stack.vcs_config.type,
stack.vcs_config.release_tag)
return None
def get_release_rosinstall(name, wet_distro, dry_distro, prefix):
"""
Check if the name is in the wet distro
"""
info = get_wet_info(wet_distro, name)
if info:
repo_name, repo_info = info
if repo_name == name and 'packages' in repo_info:
rosinstall = []
pkg_prefix = '/'.join([prefix, repo_name]) if prefix else repo_name
for pkg in repo_info['packages'].keys():
rosinstall.extend(build_rosinstall(pkg, repo_info['url'], 'git', '/'.join(
['release', pkg, repo_info['version'].split('-')[0]]), pkg_prefix))
return rosinstall
else:
return build_rosinstall(
name,
repo_info['url'],
'git',
'/'.join(['release', name, repo_info['version'].split('-')[0]]),
prefix)
# Check if the name is in the dry distro
info = get_dry_info(dry_distro, name)
if info:
name, uri, vcs_type, version = info
return build_rosinstall(name, uri, vcs_type, version, prefix)
return None
def get_manifest_yaml(name, distro):
# If we didn't find the name, we need to try to find a stack for it
url = 'http://ros.org/doc/%s/api/%s/manifest.yaml' % (distro, name)
try:
return yaml.load(urlopen(url))
except:
raise IOError("Could not load a documentation manifest for %s-%s from ros.org\n\
Have you selected a valid distro? Did you spell everything correctly? Is your package indexed on ros.org?\n\
I'm looking here: %s for a yaml file." % (distro, name, url))
def _get_fuerte_release():
"""
Please delete me when fuerte is not supported anymore
See REP137 about rosdistro files
"""
url = 'https://raw.github.com/ros/rosdistro/master/releases/fuerte.yaml'
try:
fuerte_distro = yaml.load(urlopen(url))
except:
raise IOError("Could not load the fuerte rosdistro file from github.\n"
"Are you sure you've selected a valid distro?\n"
"I'm looking for the following file %s" % url)
return fuerte_distro
def _get_fuerte_rosinstall(name, prefix=None):
"""
Please delete me when fuerte is not supported anymore
See REP137 about rosdistro files
"""
dry_distro = rospkg_distro.load_distro(rospkg_distro.distro_uri('fuerte'))
wet_distro = _get_fuerte_release()
# Check to see if the name just exists in one of our rosdistro files
rosinstall = get_release_rosinstall(name, wet_distro, dry_distro, prefix)
if rosinstall:
return rosinstall
# If we didn't find the name, we need to try to find a stack for it
doc_yaml = get_manifest_yaml(name, 'fuerte')
for metapackage in doc_yaml.get('metapackages', []):
meta_yaml = get_manifest_yaml(metapackage, 'fuerte')
if meta_yaml['package_type'] == 'stack':
rosinstall = get_release_rosinstall(
metapackage, wet_distro, dry_distro, prefix)
if rosinstall:
return rosinstall
return None
def _get_electric_rosinstall(name, prefix=None):
"""
Please delete me when you don't care at all about electric anymore
"""
dry_distro = rospkg_distro.load_distro(rospkg_distro.distro_uri('electric'))
if _is_dry(dry_distro, name):
return get_dry_rosinstall(dry_distro, name, prefix=prefix)
# If we didn't find the name, we need to try to find a stack for it
doc_yaml = get_manifest_yaml(name, 'electric')
for metapackage in doc_yaml.get('metapackages', []):
meta_yaml = get_manifest_yaml(metapackage, 'electric')
if meta_yaml['package_type'] == 'stack':
if _is_dry(dry_distro, metapackage):
return get_dry_rosinstall(dry_distro, metapackage, prefix=prefix)
return None
def _get_rosdistro_release(distro):
index = rosdistro.get_index(rosdistro.get_index_url())
return rosdistro.get_distribution_file(index, distro)
def _find_repo(release_file, name):
for r in release_file.repositories:
repo = release_file.repositories[r]
if name in repo.package_names:
return repo
return None
def _is_wet(release_file, name):
return _find_repo(release_file, name) is not None
def _is_dry(dry_distro, name):
return get_dry_info(dry_distro, name) is not None
def get_wet_rosinstall(release_file, name, prefix=None):
repo = _find_repo(release_file, name)
if repo is None: # wait, what?
return None
return build_rosinstall(name, repo.url, 'git', get_release_tag(repo, name), prefix)
def get_dry_rosinstall(dry_distro, name, prefix=None):
info = get_dry_info(dry_distro, name)
if info:
name, uri, vcs_type, version = info
return build_rosinstall(name, uri, vcs_type, version, prefix)
return None
def get_release_info(name, distro, prefix=None):
"""
Steps to check for a released version of the package
1) Look in the wet distro file for the package/stack name, if it's there, return the repo
2) Look in the dry distro file for the package/stack name, if it's there, return the repo
3) Look in the manifest.yaml generated by the documentation indexer to take a best guess at
what stack a given package belongs to
4) Look in the distro files again to see if the stack name is there, if it is, return the repo
"""
# fuerte is different.
if distro == 'fuerte':
return _get_fuerte_rosinstall(name, prefix=prefix)
# electric is ancient.
if distro == 'electric':
return _get_electric_rosinstall(name, prefix=prefix)
wet_distro = _get_rosdistro_release(distro)
dry_distro = rospkg_distro.load_distro(rospkg_distro.distro_uri(distro))
# Check to see if the name just exists in one of our rosdistro files
if _is_wet(wet_distro, name):
return get_wet_rosinstall(wet_distro, name, prefix=prefix)
if _is_dry(dry_distro, name):
return get_dry_rosinstall(dry_distro, name, prefix=prefix)
# If we didn't find the name, we need to try to find a stack for it
doc_yaml = get_manifest_yaml(name, distro)
for metapackage in doc_yaml.get('metapackages', []):
meta_yaml = get_manifest_yaml(metapackage, distro)
if meta_yaml['package_type'] == 'stack':
if _is_dry(dry_distro, metapackage):
return get_dry_rosinstall(dry_distro, metapackage, prefix=prefix)
return None
def get_doc_info(name, distro, prefix=None):
doc_yaml = get_manifest_yaml(name, distro)
return build_rosinstall(
doc_yaml['repo_name'], doc_yaml['vcs_uri'], doc_yaml['vcs'],
doc_yaml.get('vcs_version', ''), prefix)
def get_doc_type(name, distro):
return get_manifest_yaml(name, distro)['package_type']
def get_doc_www(name, distro):
return get_manifest_yaml(name, distro)['url']
def get_doc_description(name, distro):
return get_manifest_yaml(name, distro)['description']
|
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# 1st set up logging
import logging
import logging.config
import time as _time
import tempfile as _tempfile
import os as _os
from .queue_channel import QueueHandler, MutableQueueListener
import urllib as _urllib
import re as _re
from zipfile import ZipFile as _ZipFile
import bz2 as _bz2
import tarfile as _tarfile
import itertools as _itertools
import uuid as _uuid
import datetime as _datetime
import logging as _logging
import sys as _sys
from .sframe_generation import generate_random_sframe
from .sframe_generation import generate_random_regression_sframe
from .sframe_generation import generate_random_classification_sframe
from .type_checks import _raise_error_if_not_function
from .type_checks import _raise_error_if_not_of_type
from .type_checks import _is_non_string_iterable
from .type_checks import _is_string
def _i_am_a_lambda_worker():
if _re.match(".*lambda_worker.*", _sys.argv[0]) is not None:
return True
return False
try:
from queue import Queue as queue
except ImportError:
from Queue import Queue as queue
try:
import configparser as _ConfigParser
except ImportError:
import ConfigParser as _ConfigParser
__LOGGER__ = _logging.getLogger(__name__)
# overuse the same logger so we have one logging config
root_package_name = __import__(__name__.split('.')[0]).__name__
logger = logging.getLogger(root_package_name)
client_log_file = _os.path.join(_tempfile.gettempdir(),
root_package_name +
'_client_%d_%d.log' % (_time.time(), _os.getpid()))
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
# XXX: temp!
'format': '%(asctime)s [%(levelname)s] %(name)s, %(lineno)s: %(message)s'
},
'brief': {
# XXX: temp!
'format': '%(asctime)s [%(levelname)s] %(name)s, %(lineno)s: %(message)s'
#'format': '[%(levelname)s] %(message)s'
}
},
'handlers': {
'default': {
'class': 'logging.StreamHandler',
'formatter': 'brief'
},
'file': {
'class':'logging.FileHandler',
'formatter':'standard',
'filename':client_log_file,
'encoding': 'UTF-8',
'delay': 'False',
}
},
'loggers': {
'': {
'handlers': ['default', 'file'],
'propagate': 'True'
}
}
})
# Set module specific log levels
logging.getLogger('librato').setLevel(logging.CRITICAL)
logging.getLogger('requests').setLevel(logging.CRITICAL)
if _i_am_a_lambda_worker():
logging.getLogger(root_package_name).setLevel(logging.WARNING)
logging.getLogger(__name__).setLevel(logging.WARNING)
else:
logging.getLogger(root_package_name).setLevel(logging.INFO)
logging.getLogger(__name__).setLevel(logging.INFO)
#amend the logging configuration with a handler streaming to a message queue
q = queue(-1)
ql = MutableQueueListener(q)
qh = QueueHandler(q)
logging.root.addHandler(qh)
ql.start()
def stop_queue_listener():
ql.stop()
def _attach_log_handler(handler):
ql.addHandler(handler)
def _detach_log_handler(handler):
ql.removeHandler(handler)
def _convert_slashes(path):
"""
Converts all windows-style slashes to unix-style slashes
"""
return path.replace('\\', '/')
def _get_aws_credentials():
"""
Returns the values stored in the AWS credential environment variables.
Returns the value stored in the AWS_ACCESS_KEY_ID environment variable and
the value stored in the AWS_SECRET_ACCESS_KEY environment variable.
Returns
-------
out : tuple [string]
The first string of the tuple is the value of the AWS_ACCESS_KEY_ID
environment variable. The second string of the tuple is the value of the
AWS_SECRET_ACCESS_KEY environment variable.
See Also
--------
set_credentials
Examples
--------
>>> graphlab.aws.get_credentials()
('RBZH792CTQPP7T435BGQ', '7x2hMqplWsLpU/qQCN6xAPKcmWo46TlPJXYTvKcv')
"""
if (not 'AWS_ACCESS_KEY_ID' in _os.environ):
raise KeyError('No access key found. Please set the environment variable AWS_ACCESS_KEY_ID, or using graphlab.aws.set_credentials()')
if (not 'AWS_SECRET_ACCESS_KEY' in _os.environ):
raise KeyError('No secret key found. Please set the environment variable AWS_SECRET_ACCESS_KEY, or using graphlab.aws.set_credentials()')
return (_os.environ['AWS_ACCESS_KEY_ID'], _os.environ['AWS_SECRET_ACCESS_KEY'])
def _try_inject_s3_credentials(url):
"""
Inject aws credentials into s3 url as s3://[aws_id]:[aws_key]:[bucket/][objectkey]
If s3 url already contains secret key/id pairs, just return as is.
"""
assert url.startswith('s3://')
path = url[5:]
# Check if the path already contains credentials
tokens = path.split(':')
# If there are two ':', its possible that we have already injected credentials
if len(tokens) == 3:
# Edge case: there are exactly two ':'s in the object key which is a false alarm.
# We prevent this by checking that '/' is not in the assumed key and id.
if ('/' not in tokens[0]) and ('/' not in tokens[1]):
return url
# S3 url does not contain secret key/id pair, query the environment variables
(k, v) = _get_aws_credentials()
return 's3://' + k + ':' + v + ':' + path
def _make_internal_url(url):
"""
Process user input url string with proper normalization
For all urls:
Expands ~ to $HOME
For S3 urls:
Returns the s3 URL with credentials filled in using graphlab.aws.get_aws_credential().
For example: "s3://mybucket/foo" -> "s3://$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY:mybucket/foo".
For hdfs urls:
Error if hadoop classpath is not set
For local file urls:
conver slashes for windows sanity
Parameters
----------
string
A URL (as described above).
Raises
------
ValueError
If a bad url is provided.
"""
if not url:
raise ValueError('Invalid url: %s' % url)
from .. import sys_util
from . import file_util
# Convert Windows paths to Unix-style slashes
url = _convert_slashes(url)
# Try to split the url into (protocol, path).
protocol = file_util.get_protocol(url)
is_local = False
if protocol in ['http', 'https']:
pass
elif protocol == 'hdfs':
if not sys_util.get_hadoop_class_path():
raise ValueError("HDFS URL is not supported because Hadoop not found. Please make hadoop available from PATH or set the environment variable HADOOP_HOME and try again.")
elif protocol == 's3':
return _try_inject_s3_credentials(url)
elif protocol == '':
is_local = True
elif (protocol == 'local' or protocol == 'remote'):
# local and remote are legacy protocol for seperate server process
is_local = True
# This code assumes local and remote are same machine
url = _re.sub(protocol+'://','',url,count=1)
else:
raise ValueError('Invalid url protocol %s. Supported url protocols are: local, s3://, https:// and hdfs://' % protocol)
if is_local:
url = _os.path.abspath(_os.path.expanduser(url))
return url
def _download_dataset(url_str, extract=True, force=False, output_dir="."):
"""Download a remote dataset and extract the contents.
Parameters
----------
url_str : string
The URL to download from
extract : bool
If true, tries to extract compressed file (zip/gz/bz2)
force : bool
If true, forces to retry the download even if the downloaded file already exists.
output_dir : string
The directory to dump the file. Defaults to current directory.
"""
fname = output_dir + "/" + url_str.split("/")[-1]
#download the file from the web
if not _os.path.isfile(fname) or force:
print("Downloading file from:", url_str)
_urllib.urlretrieve(url_str, fname)
if extract and fname[-3:] == "zip":
print("Decompressing zip archive", fname)
_ZipFile(fname).extractall(output_dir)
elif extract and fname[-6:] == ".tar.gz":
print("Decompressing tar.gz archive", fname)
_tarfile.TarFile(fname).extractall(output_dir)
elif extract and fname[-7:] == ".tar.bz2":
print("Decompressing tar.bz2 archive", fname)
_tarfile.TarFile(fname).extractall(output_dir)
elif extract and fname[-3:] == "bz2":
print("Decompressing bz2 archive:", fname)
outfile = open(fname.split(".bz2")[0], "w")
print("Output file:", outfile)
for line in _bz2.BZ2File(fname, "r"):
outfile.write(line)
outfile.close()
else:
print("File is already downloaded.")
def is_directory_archive(path):
"""
Utiilty function that returns True if the path provided is a directory that has an SFrame or SGraph in it.
SFrames are written to disk as a directory archive, this function identifies if a given directory is an archive
for an SFrame.
Parameters
----------
path : string
Directory to evaluate.
Returns
-------
True if path provided is an archive location, False otherwise
"""
if path is None:
return False
if not _os.path.isdir(path):
return False
ini_path = '/'.join([_convert_slashes(path), 'dir_archive.ini'])
if not _os.path.exists(ini_path):
return False
if _os.path.isfile(ini_path):
return True
return False
def get_archive_type(path):
"""
Returns the contents type for the provided archive path.
Parameters
----------
path : string
Directory to evaluate.
Returns
-------
Returns a string of: sframe, sgraph, raises TypeError for anything else
"""
if not is_directory_archive(path):
raise TypeError('Unable to determine the type of archive at path: %s' % path)
try:
ini_path = '/'.join([_convert_slashes(path), 'dir_archive.ini'])
parser = _ConfigParser.SafeConfigParser()
parser.read(ini_path)
contents = parser.get('metadata', 'contents')
return contents
except Exception as e:
raise TypeError('Unable to determine type of archive for path: %s' % path, e)
def get_environment_config():
"""
Returns all the GraphLab Create configuration variables that can only
be set via environment variables.
- *GRAPHLAB_FILEIO_WRITER_BUFFER_SIZE*
The file write buffer size.
- *GRAPHLAB_FILEIO_READER_BUFFER_SIZE*
The file read buffer size.
- *OMP_NUM_THREADS*
The maximum number of threads to use for parallel processing.
Parameters
----------
None
Returns
-------
Returns a dictionary of {key:value,..}
"""
from ..connect import main as _glconnect
unity = _glconnect.get_unity()
return unity.list_globals(False)
def get_runtime_config():
"""
Returns all the GraphLab Create configuration variables that can be set
at runtime. See :py:func:`graphlab.set_runtime_config()` to set these
values and for documentation on the effect of each variable.
Parameters
----------
None
Returns
-------
Returns a dictionary of {key:value,..}
"""
from ..connect import main as _glconnect
unity = _glconnect.get_unity()
return unity.list_globals(True)
def set_runtime_config(name, value):
"""
Configures system behavior at runtime. These configuration values are also
read from environment variables at program startup if available. See
:py:func:`graphlab.get_runtime_config()` to get the current values for
each variable.
Note that defaults may change across versions and the names
of performance tuning constants may also change as improved algorithms
are developed and implemented.
**Basic Configuration Variables**
- *GRAPHLAB_CACHE_FILE_LOCATIONS*
The directory in which intermediate SFrames/SArray are stored.
For instance "/var/tmp". Multiple directories can be specified separated
by a colon (ex: "/var/tmp:/tmp") in which case intermediate SFrames will
be striped across both directories (useful for specifying multiple disks).
Defaults to /var/tmp if the directory exists, /tmp otherwise.
- *GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY*
The maximum amount of memory which will be occupied by *all* intermediate
SFrames/SArrays. Once this limit is exceeded, SFrames/SArrays will be
flushed out to temporary storage (as specified by
`GRAPHLAB_CACHE_FILE_LOCATIONS`). On large systems increasing this as well
as `GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY_PER_FILE` can improve performance
significantly. Defaults to 2147483648 bytes (2GB).
- *GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY_PER_FILE*
The maximum amount of memory which will be occupied by any individual
intermediate SFrame/SArray. Once this limit is exceeded, the
SFrame/SArray will be flushed out to temporary storage (as specified by
`GRAPHLAB_CACHE_FILE_LOCATIONS`). On large systems, increasing this as well
as `GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY` can improve performance
significantly for large SFrames. Defaults to 134217728 bytes (128MB).
**SSL Configuration**
- *GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_FILE*
The location of an SSL certificate file used to validate HTTPS / S3
connections. Defaults to the the Python certifi package certificates.
- *GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_DIR*
The location of an SSL certificate directory used to validate HTTPS / S3
connections. Defaults to the operating system certificates.
- *GRAPHLAB_FILEIO_INSECURE_SSL_CERTIFICATE_CHECKS*
If set to a non-zero value, disables all SSL certificate validation.
Defaults to False.
**ODBC Configuration**
- *GRAPHLAB_LIBODBC_PREFIX*
A directory containing libodbc.so. Also see :func:`graphlab.set_libodbc_path`
and :func:`graphlab.connect_odbc`
- *GRAPHLAB_ODBC_BUFFER_MAX_ROWS*
The number of rows to read from ODBC in each batch. Increasing this
may give better performance at increased memory consumption. Defaults to
2000.
- *GRAPHLAB_ODBC_BUFFER_SIZE*
The maximum ODBC buffer size in bytes when reading. Increasing this may
give better performance at increased memory consumption. Defaults to 3GB.
**Sort Performance Configuration**
- *GRAPHLAB_SFRAME_SORT_PIVOT_ESTIMATION_SAMPLE_SIZE*
The number of random rows to sample from the SFrame to estimate the
sort pivots used to partition the sort. Defaults to 2000000.
- *GRAPHLAB_SFRAME_SORT_BUFFER_SIZE*
The maximum estimated memory consumption sort is allowed to use. Increasing
this will increase the size of each sort partition, and will increase
performance with increased memory consumption. Defaults to 2GB.
**Join Performance Configuration**
- *GRAPHLAB_SFRAME_JOIN_BUFFER_NUM_CELLS*
The maximum number of cells to buffer in memory. Increasing this will
increase the size of each join partition and will increase performance
with increased memory consumption.
If you have very large cells (very long strings for instance),
decreasing this value will help decrease memory consumption.
Defaults to 52428800.
**Groupby Aggregate Performance Configuration**
- *GRAPHLAB_SFRAME_GROUPBY_BUFFER_NUM_ROWS*
The number of groupby keys cached in memory. Increasing this will increase
performance with increased memory consumption. Defaults to 1048576.
**Advanced Configuration Variables**
- *GRAPHLAB_SFRAME_FILE_HANDLE_POOL_SIZE*
The maximum number of file handles to use when reading SFrames/SArrays.
Once this limit is exceeded, file handles will be recycled, reducing
performance. This limit should be rarely approached by most SFrame/SArray
operations. Large SGraphs however may create a large a number of SFrames
in which case increasing this limit may improve performance (You may
also need to increase the system file handle limit with "ulimit -n").
Defaults to 128.
----------
name: A string referring to runtime configuration variable.
value: The value to set the variable to.
Returns
-------
Nothing
Raises
------
A RuntimeError if the key does not exist, or if the value cannot be
changed to the requested value.
"""
from ..connect import main as _glconnect
unity = _glconnect.get_unity()
ret = unity.set_global(name, value)
if ret != "":
raise RuntimeError(ret);
_GLOB_RE = _re.compile("""[*?]""")
def _split_path_elements(url):
parts = _os.path.split(url)
m = _GLOB_RE.search(parts[-1])
if m:
return (parts[0], parts[1])
else:
return (url, "")
def crossproduct(d):
"""
Create an SFrame containing the crossproduct of all provided options.
Parameters
----------
d : dict
Each key is the name of an option, and each value is a list
of the possible values for that option.
Returns
-------
out : SFrame
There will be a column for each key in the provided dictionary,
and a row for each unique combination of all values.
Example
-------
settings = {'argument_1':[0, 1],
'argument_2':['a', 'b', 'c']}
print crossproduct(settings)
+------------+------------+
| argument_2 | argument_1 |
+------------+------------+
| a | 0 |
| a | 1 |
| b | 0 |
| b | 1 |
| c | 0 |
| c | 1 |
+------------+------------+
[6 rows x 2 columns]
"""
from .. import connect as _mt
_mt._get_metric_tracker().track('util.crossproduct')
from .. import SArray
d = [list(zip(list(d.keys()), x)) for x in _itertools.product(*list(d.values()))]
sa = [{k:v for (k,v) in x} for x in d]
return SArray(sa).unpack(column_name_prefix='')
def get_graphlab_object_type(url):
'''
Given url where a GraphLab Create object is persisted, return the GraphLab
Create object type: 'model', 'graph', 'sframe', or 'sarray'
'''
from ..connect import main as _glconnect
ret = _glconnect.get_unity().get_graphlab_object_type(_make_internal_url(url))
# to be consistent, we use sgraph instead of graph here
if ret == 'graph':
ret = 'sgraph'
return ret
def _assert_sframe_equal(sf1,
sf2,
check_column_names=True,
check_column_order=True,
check_row_order=True,
float_column_delta=None):
"""
Assert the two SFrames are equal.
The default behavior of this function uses the strictest possible
definition of equality, where all columns must be in the same order, with
the same names and have the same data in the same order. Each of these
stipulations can be relaxed individually and in concert with another, with
the exception of `check_column_order` and `check_column_names`, we must use
one of these to determine which columns to compare with one another.
Parameters
----------
sf1 : SFrame
sf2 : SFrame
check_column_names : bool
If true, assert if the data values in two columns are the same, but
they have different names. If False, column order is used to determine
which columns to compare.
check_column_order : bool
If true, assert if the data values in two columns are the same, but are
not in the same column position (one is the i-th column and the other
is the j-th column, i != j). If False, column names are used to
determine which columns to compare.
check_row_order : bool
If true, assert if all rows in the first SFrame exist in the second
SFrame, but they are not in the same order.
float_column_delta : float
The acceptable delta that two float values can be and still be
considered "equal". When this is None, only exact equality is accepted.
This is the default behavior since columns of all Nones are often of
float type. Applies to all float columns.
"""
from .. import SFrame as _SFrame
if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame):
raise TypeError("Cannot function on types other than SFrames.")
if not check_column_order and not check_column_names:
raise ValueError("Cannot ignore both column order and column names.")
sf1.__materialize__()
sf2.__materialize__()
if sf1.num_cols() != sf2.num_cols():
raise AssertionError("Number of columns mismatched: " +
str(sf1.num_cols()) + " != " + str(sf2.num_cols()))
s1_names = sf1.column_names()
s2_names = sf2.column_names()
sorted_s1_names = sorted(s1_names)
sorted_s2_names = sorted(s2_names)
if check_column_names:
if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names):
raise AssertionError("SFrame does not have same column names: " +
str(sf1.column_names()) + " != " + str(sf2.column_names()))
if sf1.num_rows() != sf2.num_rows():
raise AssertionError("Number of rows mismatched: " +
str(sf1.num_rows()) + " != " + str(sf2.num_rows()))
if not check_row_order and (sf1.num_rows() > 1):
sf1 = sf1.sort(s1_names)
sf2 = sf2.sort(s2_names)
names_to_check = None
if check_column_names:
names_to_check = list(zip(sorted_s1_names, sorted_s2_names))
else:
names_to_check = list(zip(s1_names, s2_names))
for i in names_to_check:
col1 = sf1[i[0]]
col2 = sf2[i[1]]
if col1.dtype() != col2.dtype():
raise AssertionError("Columns " + str(i) + " types mismatched.")
compare_ary = None
if col1.dtype() == float and float_column_delta is not None:
dt = float_column_delta
compare_ary = ((col1 > col2-dt) & (col1 < col2+dt))
else:
compare_ary = (sf1[i[0]] == sf2[i[1]])
if not compare_ary.all():
count = 0
for j in compare_ary:
if not j:
first_row = count
break
count += 1
raise AssertionError("Columns " + str(i) +
" are not equal! First differing element is at row " +
str(first_row) + ": " + str((col1[first_row],col2[first_row])))
def _get_temp_file_location():
'''
Returns user specified temporary file location.
The temporary location is specified through:
>>> graphlab.set_runtime_config('GRAPHLAB_CACHE_FILE_LOCATIONS', ...)
'''
from ..connect import main as _glconnect
unity = _glconnect.get_unity()
cache_dir = _convert_slashes(unity.get_current_cache_file_location())
if not _os.path.exists(cache_dir):
_os.makedirs(cache_dir)
return cache_dir
def _make_temp_directory(prefix):
'''
Generate a temporary directory that would not live beyond the lifetime of
unity_server.
Caller is expected to clean up the temp file as soon as the directory is no
longer needed. But the directory will be cleaned as unity_server restarts
'''
temp_dir = _make_temp_filename(prefix=str(prefix))
_os.makedirs(temp_dir)
return temp_dir
def _make_temp_filename(prefix):
'''
Generate a temporary file that would not live beyond the lifetime of
unity_server.
Caller is expected to clean up the temp file as soon as the file is no
longer needed. But temp files created using this method will be cleaned up
when unity_server restarts
'''
temp_location = _get_temp_file_location()
temp_file_name = '/'.join([temp_location, str(prefix)+str(_uuid.uuid4())])
return temp_file_name
# datetime utilities
_ZERO = _datetime.timedelta(0)
class _UTC(_datetime.tzinfo):
"""
A UTC datetime.tzinfo class modeled after the pytz library. It includes a
__reduce__ method for pickling,
"""
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(_utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
def __reduce__(self):
return _UTC, ()
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
_utc = _UTC()
def _dt_to_utc_timestamp(t):
if t.tzname() == 'UTC':
return (t - _datetime.datetime(1970, 1, 1, tzinfo=_utc)).total_seconds()
elif not t.tzinfo:
return _time.mktime(t.timetuple())
else:
raise ValueError('Only local time and UTC time is supported')
def _pickle_to_temp_location_or_memory(obj):
'''
If obj can be serialized directly into memory (via cloudpickle) this
will return the serialized bytes.
Otherwise, gl_pickle is attempted and it will then
generates a temporary directory serializes an object into it, returning
the directory name. This directory will not have lifespan greater than
that of unity_server.
'''
from . import cloudpickle as cloudpickle
try:
# try cloudpickle first and see if that works
lambda_str = cloudpickle.dumps(obj)
return lambda_str
except:
pass
# nope. that does not work! lets try again with gl pickle
filename = _make_temp_filename('pickle')
from .. import _gl_pickle
pickler = _gl_pickle.GLPickler(filename)
pickler.dump(obj)
pickler.close()
return filename
def get_log_location():
from ..connect import main as _glconnect
server = _glconnect.get_server()
if hasattr(server, 'unity_log'):
return server.unity_log
else:
return None
def get_client_log_location():
return client_log_file
def get_server_log_location():
return get_log_location()
def get_module_from_object(obj):
mod_str = obj.__class__.__module__.split('.')[0]
return _sys.modules[mod_str]
def infer_dbapi2_types(cursor, mod_info):
desc = cursor.description
result_set_types = [i[1] for i in desc]
dbapi2_to_python = [ # a type code can match more than one, so ordered by
# preference (loop short-circuits when it finds a match
(mod_info['DATETIME'], _datetime.datetime),
(mod_info['ROWID'],int),
(mod_info['NUMBER'],float),
]
ret_types = []
# Ugly nested loop because the standard only guarantees that a type code
# will compare equal to the module-defined types
for i in result_set_types:
type_found = False
for j in dbapi2_to_python:
if i is None or j[0] is None:
break
elif i == j[0]:
ret_types.append(j[1])
type_found = True
break
if not type_found:
ret_types.append(str)
return ret_types
def pytype_to_printf(in_type):
if in_type == int:
return 'd'
elif in_type == float:
return 'f'
else:
return 's'
|
|
import tempfile
import genonets.test.utils as utils
import genonets.test.compare_result_files as comparator
from genonets.cmdl_handler import CmdParser
from genonets.interface import Genonets
from genonets.constants import AnalysisConstants as Ac
class TestPaths:
@staticmethod
def run_test(cmd_args, ground_truth_dir, data_dir):
args = CmdParser(arguments=cmd_args).get_args()
gn = Genonets(args)
gn.create()
gn.analyze(analyses=[Ac.PATHS])
gn.save_network_results()
gn.save_genotype_results()
assert utils.num_files_matches(ground_truth_dir, data_dir)
assert comparator.compare_genotype_set_measures(
ground_truth_dir, data_dir
)
assert comparator.compare_genotype_measures(
ground_truth_dir, data_dir
)
@staticmethod
def test_1():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_1'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test1_input.tsv',
'--codon-alphabet=RNA',
'--genetic-code-file=genonets/test/data/inputs/paths/code_standard.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_2():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_2'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test2_input.tsv',
'--codon-alphabet=RNA',
'--genetic-code-file=genonets/test/data/inputs/paths/code_standard.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_3():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_3'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test3_input.tsv',
'--codon-alphabet=RNA',
'--genetic-code-file=genonets/test/data/inputs/paths/code_standard.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_4():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_4'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test4_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_5():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_5'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test5_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_6():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_6'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test6_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_7():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_7'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test7_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_8():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_8'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test8_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_9():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_9'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test9_input.tsv',
'--codon-alphabet=RNA',
'--genetic-code-file=genonets/test/data/inputs/paths/code_standard.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_10():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_10'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test10_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_11():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_11'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test11_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_12():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_12'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=Protein',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test12_input.tsv',
'--codon-alphabet=RNA',
'--genetic-code-file=genonets/test/data/inputs/paths/code_standard.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
@staticmethod
def test_13():
ground_truth_dir = 'genonets/test/data/ground_truth/paths/test_13'
with tempfile.TemporaryDirectory(prefix='test_epistasis_') as data_dir:
cmd_args = [
'--alphabet=DNA',
'--tau=0.0',
'--input-file=genonets/test/data/inputs/paths/test13_input.tsv',
f'--output-path={data_dir}'
]
TestPaths.run_test(cmd_args, ground_truth_dir, data_dir)
|
|
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the javascriptstatetracker module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
_FUNCTION_SCRIPT = """\
var a = 3;
function foo(aaa, bbb, ccc) {
var b = 4;
}
/**
* JSDoc comment.
*/
var bar = function(ddd, eee, fff) {
};
/**
* Verify that nested functions get their proper parameters recorded.
*/
var baz = function(ggg, hhh, iii) {
var qux = function(jjj, kkk, lll) {
};
// make sure that entering a new block does not change baz' parameters.
{};
};
"""
class FunctionTest(googletest.TestCase):
def testFunctionParse(self):
functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
self.assertEquals(4, len(functions))
# First function
function = functions[0]
self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(3, start_token.line_number)
self.assertEquals(0, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(5, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('foo', function.name)
self.assertIsNone(function.doc)
# Second function
function = functions[1]
self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(11, start_token.line_number)
self.assertEquals(10, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(13, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('bar', function.name)
self.assertIsNotNone(function.doc)
# Check function JSDoc
doc = function.doc
doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
comment_type = javascripttokens.JavaScriptTokenType.COMMENT
comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
self.assertEquals('JSDoc comment.',
tokenutil.TokensToString(comment_tokens).strip())
# Third function
function = functions[2]
self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(19, start_token.line_number)
self.assertEquals(10, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(24, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('baz', function.name)
self.assertIsNotNone(function.doc)
# Fourth function (inside third function)
function = functions[3]
self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(20, start_token.line_number)
self.assertEquals(12, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(21, end_token.line_number)
self.assertEquals(2, end_token.start_index)
self.assertEquals('qux', function.name)
self.assertIsNone(function.doc)
class CommentTest(googletest.TestCase):
def testGetDescription(self):
comment = self._ParseComment("""
/**
* Comment targeting goog.foo.
*
* This is the second line.
* @param {number} foo The count of foo.
*/
target;""")
self.assertEqual(
'Comment targeting goog.foo.\n\nThis is the second line.',
comment.description)
def testCommentGetTarget(self):
self.assertCommentTarget('goog.foo', """
/**
* Comment targeting goog.foo.
*/
goog.foo = 6;
""")
self.assertCommentTarget('bar', """
/**
* Comment targeting bar.
*/
var bar = "Karate!";
""")
self.assertCommentTarget('doThing', """
/**
* Comment targeting doThing.
*/
function doThing() {};
""")
self.assertCommentTarget('this.targetProperty', """
goog.bar.Baz = function() {
/**
* Comment targeting targetProperty.
*/
this.targetProperty = 3;
};
""")
self.assertCommentTarget('goog.bar.prop', """
/**
* Comment targeting goog.bar.prop.
*/
goog.bar.prop;
""")
self.assertCommentTarget('goog.aaa.bbb', """
/**
* Comment targeting goog.aaa.bbb.
*/
(goog.aaa.bbb)
""")
self.assertCommentTarget('theTarget', """
/**
* Comment targeting symbol preceded by newlines, whitespace,
* and parens -- things we ignore.
*/
(theTarget)
""")
self.assertCommentTarget(None, """
/**
* @fileoverview File overview.
*/
(notATarget)
""")
self.assertCommentTarget(None, """
/**
* Comment that doesn't find a target.
*/
""")
self.assertCommentTarget('theTarget.is.split.across.lines', """
/**
* Comment that addresses a symbol split across lines.
*/
(theTarget.is.split
.across.lines)
""")
self.assertCommentTarget('theTarget.is.split.across.lines', """
/**
* Comment that addresses a symbol split across lines.
*/
(theTarget.is.split.
across.lines)
""")
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
_, comments = testutil.ParseFunctionsAndComments(script)
self.assertEquals(1, len(comments))
return comments[0]
def assertCommentTarget(self, target, script):
comment = self._ParseComment(script)
self.assertEquals(target, comment.GetTargetIdentifier())
if __name__ == '__main__':
googletest.main()
|
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
CORE_LABELS = {
"ARM7TDMI-S": ["ARM7"],
"Cortex-M0" : ["M0", "CORTEX_M"],
"Cortex-M0+": ["M0P", "CORTEX_M"],
"Cortex-M1" : ["M1", "CORTEX_M"],
"Cortex-M3" : ["M3", "CORTEX_M"],
"Cortex-M4" : ["M4", "CORTEX_M"],
"Cortex-M4F" : ["M4", "CORTEX_M"],
"Cortex-M7" : ["M7", "CORTEX_M"],
"Cortex-M7F" : ["M7", "CORTEX_M"],
"Cortex-A9" : ["A9", "CORTEX_A"]
}
import os
import binascii
import struct
import shutil
from workspace_tools.patch import patch
from paths import TOOLS_BOOTLOADERS
class Target:
def __init__(self):
# ARM Core
self.core = None
# Is the disk provided by the interface chip of this board virtual?
self.is_disk_virtual = False
# list of toolchains that are supported by the mbed SDK for this target
self.supported_toolchains = None
# list of extra specific labels
self.extra_labels = []
# list of macros (-D)
self.macros = []
# Default online compiler:
self.default_toolchain = "ARM"
self.name = self.__class__.__name__
# Code used to determine devices' platform
# This code is prefix in URL link provided in mbed.htm (in mbed disk)
self.detect_code = []
def program_cycle_s(self):
return 4 if self.is_disk_virtual else 1.5
def get_labels(self):
return [self.name] + CORE_LABELS[self.core] + self.extra_labels
def init_hooks(self, hook, toolchain_name):
pass
### MCU Support ###
class CM4_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class CM4F_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4F_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
### NXP ###
# This class implements the post-link patching step needed by LPC targets
class LPCTarget(Target):
def __init__(self):
Target.__init__(self)
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("post", self.lpc_patch)
@staticmethod
def lpc_patch(t_self, resources, elf, binf):
t_self.debug("LPC Patch: %s" % os.path.split(binf)[1])
patch(binf)
class LPC11C24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11CXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC1114(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U24_401']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["1040"]
class OC_MBUINO(LPC11U24):
def __init__(self):
LPC11U24.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.macros = ['TARGET_LPC11U24']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24_301(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC11U34_421(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class MICRONFCBOARD(LPC11U34_421):
def __init__(self):
LPC11U34_421.__init__(self)
self.macros = ['LPC11U34_421', 'APPNEARME_MICRONFCBOARD']
self.extra_labels = ['NXP', 'LPC11UXX', 'APPNEARME_MICRONFCBOARD']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class LPC11U35_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501_IBDAP(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_Y5_MBUG(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U37_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPCCAPPUCCINO(LPC11U37_501):
def __init__(self):
LPC11U37_501.__init__(self)
class ARCH_GPRS(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U37_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class LPC11U68(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC11U6X']
self.supported_toolchains = ["ARM", "uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1168"]
class LPC1347(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC13XX']
self.supported_toolchains = ["ARM", "GCC_ARM","IAR"]
class LPC1549(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC15XX']
self.supported_toolchains = ["uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1549"]
class LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'MBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.detect_code = ["1010"]
class ARCH_PRO(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class UBLOX_C027(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class XBED_LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'XBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.detect_code = ["1010"]
class LPC2368(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC23XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "GCC_CR"]
class LPC2460(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC2460']
self.supported_toolchains = ["GCC_ARM"]
class LPC810(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC812(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["1050"]
class LPC824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM","GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class SSCI824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC4088(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC408X']
self.supported_toolchains = ["ARM", "GCC_CR", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
if not os.path.isdir(binf):
# Regular binary file, nothing to do
LPCTarget.lpc_patch(t_self, resources, elf, binf)
return
outbin = open(binf + ".temp", "wb")
partf = open(os.path.join(binf, "ER_IROM1"), "rb")
# Pad the fist part (internal flash) with 0xFF to 512k
data = partf.read()
outbin.write(data)
outbin.write('\xFF' * (512*1024 - len(data)))
partf.close()
# Read and append the second part (external flash) in chunks of fixed size
chunksize = 128 * 1024
partf = open(os.path.join(binf, "ER_IROM2"), "rb")
while True:
data = partf.read(chunksize)
outbin.write(data)
if len(data) < chunksize:
break
partf.close()
outbin.close()
# Remove the directory with the binary parts and rename the temporary
# file to 'binf'
shutil.rmtree(binf, True)
os.rename(binf + '.temp', binf)
t_self.debug("Generated custom binary file (internal flash + SPIFI)")
LPCTarget.lpc_patch(t_self, resources, elf, binf)
class LPC4088_DM(LPC4088):
pass
class LPC4330_M4(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR", "GCC_ARM"]
class LPC4330_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC4337(LPCTarget):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4337']
self.supported_toolchains = ["ARM"]
class LPC1800(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC43XX']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC11U37H_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
### Freescale ###
class KL05Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL25Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "GCC_CW_EWL", "GCC_CW_NEWLIB", "GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0200"]
class KL26Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM","GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL43Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL46Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0220"]
class K20D50M(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
class TEENSY3_1(Target):
OUTPUT_EXT = 'hex'
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX', 'K20DX256']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset = 0)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class K22F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE']
self.macros = ["CPU_MK22FN512VLH12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0201"]
class K64F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F', 'FRDM']
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
self.detect_code = ["0240"]
class MTS_GAMBIT(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED", "TARGET_K64F"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
### STMicro ###
class NUCLEO_F030R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F030R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0725"]
class NUCLEO_F070RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F070RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0755"]
class NUCLEO_F072RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F072RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0730"]
class NUCLEO_F091RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F091RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0750"]
class NUCLEO_F103RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F103RB']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0700"]
class NUCLEO_F302R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F302R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0705"]
class NUCLEO_F303RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0745"]
class NUCLEO_F334R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0735"]
class NUCLEO_F401RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0720"]
class NUCLEO_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0740"]
class NUCLEO_F446RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F446RE']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
class NUCLEO_L053R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053R8']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0715"]
class NUCLEO_L073RZ(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L073RZ']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0760"]
class NUCLEO_L152RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0710"]
class STM32F3XX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['STM', 'STM32F3XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class STM32F407(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F4XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
class ARCH_MAX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.macros = ['LSI_VALUE=32000']
def program_cycle_s(self):
return 2
class DISCO_F051R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F051', 'STM32F051R8']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F100RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F100RB']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F303VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303', 'STM32F303VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F334C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0810"]
class DISCO_F407VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
class DISCO_F429ZI(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F429', 'STM32F429ZI']
self.supported_toolchains = ["GCC_ARM", "IAR"]
self.default_toolchain = "GCC_ARM"
class DISCO_L053C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F746NG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M7F"
self.extra_labels = ['STM', 'STM32F7', 'STM32F746', 'STM32F746NG']
self.supported_toolchains = ["ARM", "uARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["0815"]
class MTS_MDOT_F405RG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F405RG']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=48000000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
class MTS_MDOT_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=96000000', 'USE_PLL_HSE_EXTC=0', 'VECT_TAB_OFFSET=0x00010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_MDOT_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MTS_DRAGONFLY_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'VECT_TAB_OFFSET=0x08010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_DRAGONFLY_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MOTE_L152RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["4100"]
class DISCO_F401VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401', 'STM32F401VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class UBLOX_C029(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F439', 'STM32F439ZI']
self.macros = ['HSE_VALUE=24000000', 'HSE_STARTUP_TIMEOUT=5000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class NZ32SC151(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L151RC']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
# After flashing device, how long to delay until we assume program is running
def program_cycle_s(self):
return 1.5
### Nordic ###
class NRF51822(Target):
# the following is a list of possible Nordic softdevices in decreasing order
# of preference.
EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's130_nrf51_1.0.0_softdevice.hex',
'offset' : 0x1C000
},
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_7.0.0_softdevice.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_6.0.0_softdevice.hex',
'offset' : 0x14000
}
]
EXPECTED_BOOTLOADER_FILENAME = "nrf51822_bootloader.hex"
OUTPUT_EXT = 'hex'
MERGE_SOFT_DEVICE = True
MERGE_BOOTLOADER = False
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "NRF51822_MKIT", "MCU_NRF51822", "MCU_NORDIC_16K"]
self.common_macros = ['NRF51']
self.macros = self.common_macros
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["1070"]
def program_cycle_s(self):
return 6
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM', 'IAR']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
# Scan to find the actual paths of soft device and bootloader files
sdf = None
blf = None
for hexf in resources.hex_files:
if hexf.find(t_self.target.EXPECTED_BOOTLOADER_FILENAME) != -1:
blf = hexf
else:
for softdeviceAndOffsetEntry in t_self.target.EXPECTED_SOFTDEVICES_WITH_OFFSETS:
if hexf.find(softdeviceAndOffsetEntry['name']) != -1:
sdf = hexf
break
if sdf is None:
t_self.debug("Hex file not found. Aborting.")
return
# Merge user code with softdevice
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset=softdeviceAndOffsetEntry['offset'])
if t_self.target.MERGE_SOFT_DEVICE is True:
t_self.debug("Merge SoftDevice file %s" % softdeviceAndOffsetEntry['name'])
sdh = IntelHex(sdf)
binh.merge(sdh)
if t_self.target.MERGE_BOOTLOADER is True and blf is not None:
t_self.debug("Merge BootLoader file %s" % t_self.target.EXPECTED_BOOTLOADER_FILENAME)
blh = IntelHex(blf)
binh.merge(blh)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class NRF51822_BOOT(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "NRF51822_MKIT", "MCU_NRF51822", "MCU_NORDIC_16K", "NRF51822"]
self.macros = ['TARGET_NRF51822', 'TARGET_OTA_ENABLED']
self.macros += self.common_macros
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class NRF51822_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "NRF51822_MKIT", "MCU_NRF51822", "MCU_NORDIC_16K", "NRF51822"]
self.macros = ['TARGET_NRF51822', 'TARGET_OTA_ENABLED']
self.macros += self.common_macros
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = False
class NRF51_DK(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_BOOT(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K', 'NRF51_DK']
self.macros = ['TARGET_NRF51822', 'TARGET_NRF51_DK', 'TARGET_OTA_ENABLED']
self.macros += self.common_macros
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class NRF51_DK_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K', 'NRF51_DK']
self.macros = ['TARGET_NRF51822', 'TARGET_NRF51_DK', 'TARGET_OTA_ENABLED']
self.macros += self.common_macros
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.MERGE_SOFT_DEVICE = False
class NRF51_DONGLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_32K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class ARCH_BLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
self.supported_form_factors = ["ARDUINO"]
class SEEED_TINY_BLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class SEEED_TINY_BLE_BOOT(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'SEEED_TINY_BLE']
self.macros = ['TARGET_NRF51822', 'TARGET_SEEED_TINY_BLE', 'TARGET_OTA_ENABLED']
self.macros += self.common_macros
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class SEEED_TINY_BLE_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'SEEED_TINY_BLE']
self.macros = ['TARGET_NRF51822', 'TARGET_SEEED_TINY_BLE', 'TARGET_OTA_ENABLED']
self.macros += self.common_macros
self.MERGE_SOFT_DEVICE = False
class HRM1017(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class RBLAB_NRF51822(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
self.supported_form_factors = ["ARDUINO"]
class RBLAB_BLENANO(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class NRF51822_Y5_MBUG(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class XADOW_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class WALLBOT_BLE(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class DELTA_DFCM_NNN40(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K']
self.macros = ['TARGET_NRF51822']
self.macros += self.common_macros
class DELTA_DFCM_NNN40_OTA(NRF51822):
def __init__(self):
NRF51822.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NORDIC', 'MCU_NRF51822', 'MCU_NORDIC_16K', 'DELTA_DFCM_NNN40']
self.MERGE_SOFT_DEVICE = False
self.macros += self.common_macros
### ARM ###
class ARM_MPS2_Target(Target):
def __init__(self):
Target.__init__(self)
self.OUTPUT_EXT = 'axf'
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("replace", self.output_axf)
@staticmethod
def output_axf(t_self, resources, elf, bin):
shutil.copy(elf, bin)
t_self.debug("Passing ELF file %s" % bin)
class ARM_MPS2_M0(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0']
self.macros = ['CMSDK_CM0']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M0P(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0P']
self.macros = ['CMSDK_CM0plus']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M1(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M1"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M1']
self.macros = ['CMSDK_CM1']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M3(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M3']
self.macros = ['CMSDK_CM3']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M4(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M4']
self.macros = ['CMSDK_CM4']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M7(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M7']
self.macros = ['CMSDK_CM7']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2(ARM_MPS2_M4):
pass
### Renesas ###
class RZ_A1H(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM"
def program_cycle_s(self):
return 2
### Maxim Integrated ###
class MAXWSNENV(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32610']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
class MAX32600MBED(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32600']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
### Silicon Labs ###
class EFM32GG_STK3700(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32GG990F1024']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32LG_STK3600(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32LG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32WG_STK3800(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32WG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32ZG_STK3200(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32ZG222F32']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
class EFM32HG_STK3400(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32HG322F64']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
##WIZnet
class WIZWIKI_W7500(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['WIZNET', 'W7500x', 'WIZwiki_W7500']
self.supported_toolchains = ["uARM", "ARM"]
self.default_toolchain = "ARM"
self.supported_form_factors = ["ARDUINO"]
class SAMD21J18A(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Atmel', 'SAM21']
self.macros = ['__SAMD21J18A__']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class SAMR21G18A(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Atmel', 'SAM21']
self.macros = ['__SAMR21G18A__']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
# Get a single instance for each target
TARGETS = [
### NXP ###
LPC11C24(),
LPC11U24(),
OC_MBUINO(), # LPC11U24
LPC11U24_301(),
LPC11U34_421(),
MICRONFCBOARD(), # LPC11U34_421
LPC11U35_401(),
LPC11U35_501(),
LPC11U35_501_IBDAP(),
XADOW_M0(), # LPC11U35_501
LPC11U35_Y5_MBUG(),
LPC11U37_501(),
LPCCAPPUCCINO(),# LPC11U37_501
ARCH_GPRS(), # LPC11U37_501
LPC11U68(),
LPC1114(),
LPC1347(),
LPC1549(),
LPC1768(),
ARCH_PRO(), # LPC1768
UBLOX_C027(), # LPC1768
XBED_LPC1768(), # LPC1768
LPC2368(),
LPC2460(),
LPC810(),
LPC812(),
LPC824(),
SSCI824(), # LPC824
LPC4088(),
LPC4088_DM(),
LPC4330_M4(),
LPC4330_M0(),
LPC4337(),
LPC11U37H_401(),
### Freescale ###
KL05Z(),
KL25Z(),
KL26Z(),
KL43Z(),
KL46Z(),
K20D50M(),
TEENSY3_1(),
K22F(),
K64F(),
MTS_GAMBIT(), # FRDM K64F
### STMicro ###
NUCLEO_F030R8(),
NUCLEO_F070RB(),
NUCLEO_F072RB(),
NUCLEO_F091RC(),
NUCLEO_F103RB(),
NUCLEO_F302R8(),
NUCLEO_F303RE(),
NUCLEO_F334R8(),
NUCLEO_F401RE(),
NUCLEO_F411RE(),
NUCLEO_F446RE(),
NUCLEO_L053R8(),
NUCLEO_L073RZ(),
NUCLEO_L152RE(),
STM32F3XX(),
STM32F407(),
DISCO_F051R8(),
DISCO_F100RB(),
DISCO_F303VC(),
DISCO_F334C8(),
DISCO_F746NG(),
DISCO_F407VG(), # STM32F407
ARCH_MAX(), # STM32F407
DISCO_F429ZI(),
DISCO_L053C8(),
MTS_MDOT_F405RG(),
MTS_MDOT_F411RE(),
MOTE_L152RC(),
MTS_DRAGONFLY_F411RE(),
DISCO_F401VC(),
UBLOX_C029(), # STM32F439
NZ32SC151(), # STM32L151
### Nordic ###
NRF51822(),
NRF51822_BOOT(), # nRF51822
NRF51822_OTA(), # nRF51822
NRF51_DK(),
NRF51_DK_BOOT(), # nRF51822
NRF51_DK_OTA(), # nRF51822
NRF51_DONGLE(),
ARCH_BLE(), # nRF51822
SEEED_TINY_BLE(), # nRF51822
SEEED_TINY_BLE_BOOT(),# nRF51822
SEEED_TINY_BLE_OTA(),# nRF51822
HRM1017(), # nRF51822
RBLAB_NRF51822(),# nRF51822
RBLAB_BLENANO(),# nRF51822
NRF51822_Y5_MBUG(),#nRF51822
WALLBOT_BLE(), # nRF51822
DELTA_DFCM_NNN40(), # nRF51822
DELTA_DFCM_NNN40_OTA(), # nRF51822
### ARM ###
ARM_MPS2_M0(),
ARM_MPS2_M0P(),
ARM_MPS2_M1(),
ARM_MPS2_M3(),
ARM_MPS2_M4(),
ARM_MPS2_M7(),
ARM_MPS2(),
### Renesas ###
RZ_A1H(),
### Maxim Integrated ###
MAXWSNENV(),
MAX32600MBED(),
### Silicon Labs ###
EFM32GG_STK3700(),
EFM32LG_STK3600(),
EFM32WG_STK3800(),
EFM32ZG_STK3200(),
EFM32HG_STK3400(),
### WIZnet ###
WIZWIKI_W7500(),
SAMD21J18A(),
SAMR21G18A(),
]
# Map each target name to its unique instance
TARGET_MAP = {}
for t in TARGETS:
TARGET_MAP[t.name] = t
TARGET_NAMES = TARGET_MAP.keys()
# Some targets with different name have the same exporters
EXPORT_MAP = { }
# Detection APIs
def get_target_detect_codes():
""" Returns dictionary mapping detect_code -> platform_name
"""
result = {}
for target in TARGETS:
for detect_code in target.detect_code:
result[detect_code] = target.name
return result
|
|
######## ########
# Hi there, curious student. #
# #
# This submission script downloads some tests, #
# runs the tests against your code, #
#and then sends the results to a server for grading. #
# #
# Changing this script might cause your #
# submissions to fail. #
#You can use the option '--dry-run' to see the tests #
# that would be run without actually running them. #
######## ########
import os, sys, doctest, traceback, urllib.request, urllib.parse, urllib.error, base64, ast, re, imp, ast, json
import hashlib
import pdb
SUBMIT_VERSION = '4.0'
RECEIPT_DIR = os.path.join('.', 'receipts');
session = 'matrix-002'
grader_url = 'class.coursera.org/%s/assignment' % session
static_url = 'courseratests.codingthematrix.com'
protocol = 'https'
dry_run = False
verbose = False
show_submission = False
show_feedback = False
login = None
password = None
################ FOR VERIFYING SIGNATURE ON TESTS ################
from collections import namedtuple
from hashlib import sha512
hashfn = sha512
PublicKey = namedtuple('PublicKey', ('N', 'e'))
def hash(lines, salt):
m = hashfn()
for line in lines:
m.update(str(line).encode())
m.update(str(salt).encode())
return m.digest()
def unsign(m, key): return pow(m, key.e, key.N)
def b2i(m): return int.from_bytes(m, 'little')
def verify_signature(lines, signature, key):
salt, sig = signature
hashed = hash(lines, salt)
return b2i(hashed) == unsign(sig, key)
def verify_signature_lines(lines, key):
i = iter(lines)
firstline = next(i)
salt_str, sign_str = firstline.split(' ')
(salt, sig) = int(salt_str), int(sign_str)
return verify_signature(i, (salt, sig), key)
def check_signature(response):
key = PublicKey(N=10810480223307555270754793974348137028346231911887128372498894236522333862768535904711981770850660563024357718007478468455827997422651868772756940504390694970913100697704378592429214786267348296187069428987220571233110244818841009602583740157557662581909170939997953247229188236715181458561434858401678704933253698171424919513416718407303681363275403114095516851428969948115240989059101545870985909066841768134526273721190057338992190632073739245354402667060338194897351550243889777461715790352313337931,e=356712077277075117461112781152011833907464773700347296194891295955027010053581043972802545021976353698381277440683503945344229100132712230552438667754457538982795034975143122540063545095552633393479508230675918312833788633196124838699032398201767280061)
return verify_signature_lines(response, key)
def get_asgn_data(asgn_name):
try:
with urllib.request.urlopen('http://%s/%s.tests'%(static_url, asgn_name)) as tf:
response = tf.read().decode('utf8').split('\n')
except urllib.error.URLError:
print("Could not find tests for this assignment. Check your Internet connection.")
sys.exit(1)
except urllib.error.HTTPError:
print("Tests not available for assignment '%s'"%asgn_name)
sys.exit(1)
if check_signature(response):
return ast.literal_eval('\n'.join(response[1:]))
else:
print("Assignment data improperly signed!")
sys.exit(1)
########### END OF SIGNATURE-VERIFICATION CODE ###############
########### SOME AUXILIARY PROCEDURES FOR DOCTESTING #########
def test_format(obj, precision=2):
tf = lambda o: test_format(o, precision)
delimit = lambda o: ', '.join(o)
otype = type(obj)
if otype is str:
return repr(obj)
elif otype is float or otype is int:
if otype is int:
obj = float(obj)
if -0.000001 < obj < 0.000001:
obj = 0.0
fstr = '%%.%df' % precision
return fstr % obj
elif otype is set:
if len(obj) == 0:
return 'set()'
return '{%s}' % delimit(sorted(map(tf, obj)))
elif otype is dict:
return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
elif otype is list:
return '[%s]' % delimit(map(tf, obj))
elif otype is tuple:
return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) == 1 else '')
elif otype.__name__ in ['Vec','Mat']:
entries = tf({x:obj.f[x] for x in obj.f if tf(obj.f[x]) != tf(0)})
return '%s(%s, %s)' % (otype.__name__, tf(obj.D), entries)
else:
return str(obj)
def find_lines(varname):
return [line for line in open(asgn_name+'.py') if line.startswith(varname)]
def find_line(varname):
ls = find_lines(varname)
if len(ls) != 1:
print("ERROR: stencil file should have exactly one line containing the string '%s'" % varname)
return None
return ls[0]
def use_comprehension(varname):
line = find_line(varname)
try:
parse_elements = ast.dump(ast.parse(line))
except SyntaxError:
raise SyntaxError("Sorry---for this task, comprehension must be on one line.")
return "comprehension" in parse_elements
def double_comprehension(varname):
line = find_line(varname)
try:
parse_elements = ast.dump(ast.parse(line))
except SyntaxError:
raise SyntaxError("Sorry---for this task, comprehension must be on one line.")
return parse_elements.count("comprehension") == 2
def line_contains_substr(varname, word):
line = find_line(varname)
return word in line
def substitute_in_assignment(varname, new_env):
assignment = find_line(varname)
g = globals().copy()
g.update(new_env)
return eval(compile(ast.Expression(ast.parse(assignment).body[0].value), '', 'eval'), g)
##### END AUXILIARY PROCEDURES FOR TESTS ################
def output(tests, test_vars):
dtst = doctest.DocTestParser().get_doctest(tests, test_vars, 0, '<string>', 0)
runner = ModifiedDocTestRunner()
runner.run(dtst)
return runner.results
class ModifiedDocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
self.results = []
return super(ModifiedDocTestRunner, self).__init__(*args, checker=OutputAccepter(), **kwargs)
def report_success(self, out, test, example, got):
self.results.append(got)
def report_unexpected_exception(self, out, test, example, exc_info):
exf = traceback.format_exception_only(exc_info[0], exc_info[1])[-1]
self.results.append(exf)
sys.stderr.write("TEST ERROR: "+exf) #added so as not to fail silently
class OutputAccepter(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
return True
def do_challenge(login, passwd, sid):
login, ch, state, ch_aux = get_challenge(login, sid)
if not all((login, ch, state)):
print('\n!! Challenge Failed: %s\n' % login)
sys.exit(1)
return challenge_response(login, passwd, ch), state
def get_challenge(email, sid):
values = {'email_address': email, "assignment_part_sid": sid, "response_encoding": "delim"}
data = urllib.parse.urlencode(values).encode('utf8')
req = urllib.request.Request(challenge_url, data)
with urllib.request.urlopen(req) as resp:
text = resp.read().decode('utf8').strip()
# text resp is email|ch|signature
splits = text.split('|')
if len(splits) != 9:
print("Badly formatted challenge response: %s" % text)
sys.exit(1)
return splits[2], splits[4], splits[6], splits[8]
def challenge_response(login, passwd, ch):
sha1 = hashlib.sha1()
sha1.update("".join([ch, passwd]).encode('utf8'))
digest = sha1.hexdigest()
return digest
def submit(parts_string):
print('= Coding the Matrix Homework and Lab Submission')
print('Importing your stencil file')
try:
solution = __import__(asgn_name)
test_vars = vars(solution).copy()
except Exception as exc:
print(exc)
print("!! It seems that you have an error in your stencil file. Please make sure Python can import your stencil before submitting, as in...")
print("""
underwood:matrix klein$ python3
Python 3.4.1
>>> import """+asgn_name+"\n")
sys.exit(1)
if not 'coursera' in test_vars:
print("This is not a Coursera stencil. Make sure your stencil is obtained from http://grading.codingthematrix.com/coursera/")
sys.exit(1)
print('Fetching problems')
source_files, problems = get_asgn_data(asgn_name)
test_vars['test_format'] = test_vars['tf'] = test_format
test_vars['find_lines'] = find_lines
test_vars['find_line'] = find_line
test_vars['use_comprehension'] = use_comprehension
test_vars['double_comprehension'] = double_comprehension
test_vars['line_contains_substr'] = line_contains_substr
test_vars['substitute_in_assignment'] = substitute_in_assignment
global login
if not login:
login = login_prompt()
global password
if not password:
password = password_prompt()
if not parts_string:
parts_string = parts_prompt(problems)
parts = parse_parts(parts_string, problems)
for sid, name, part_tests in parts:
print('== Submitting "%s"' % name)
coursera_sid = asgn_name + '#' + sid
#TODO: check challenge stuff
ch_resp, state = do_challenge(login, password, coursera_sid)
if dry_run:
print(part_tests)
else:
if 'DEV' in os.environ: sid += '-dev'
# to stop Coursera's strip() from doing anything, we surround in parens
results = output(part_tests, test_vars)
prog_out = '(%s)' % ''.join(map(str.rstrip, results))
src = source(source_files)
if verbose:
res_itr = iter(results)
for t in part_tests.split('\n'):
print(t)
if t[:3] == '>>>':
print(next(res_itr), end='')
if show_submission:
print('Submission:\n%s\n' % prog_out)
feedback = submit_solution(name, coursera_sid, prog_out, src, state, ch_resp)
print(feedback)
def login_prompt():
return input('username: ')
def password_prompt():
return input('password: ')
def parts_prompt(problems):
print('This assignment has the following parts:')
# change to list all the possible parts?
for i, (name, parts) in enumerate(problems):
if parts:
print(' %d) %s' % (i+1, name))
else:
print(' %d) [NOT AUTOGRADED] %s' % (i+1, name))
return input('\nWhich parts do you want to submit? (Ex: 1, 4-7): ')
def parse_range(s, problems):
try:
s = s.split('-')
if len(s) == 1:
index = int(s[0])
if(index == 0):
return list(range(1, len(problems)+1))
else:
return [int(s[0])]
elif len(s) == 2:
return list(range(int(s[0], 0, ), 1+int(s[1])))
except:
pass
return [] # Invalid value
def parse_parts(string, problems):
pr = lambda s: parse_range(s, problems)
parts = map(pr, string.split(','))
flat_parts = sum(parts, [])
return sum((problems[i-1][1] for i in flat_parts if 0<i<=len(problems)), [])
def submit_solution(name, sid, output, source_text, st, ch_resp):
b64ize = lambda s: str(base64.encodebytes(s.encode('utf-8')), 'ascii')
values = { 'challenge_response' : ch_resp
, 'assignment_part_sid' : sid
, 'email_address' : login
, 'submission' : b64ize(output)
, 'submission_aux' : b64ize(source_text)
, 'state' : st
}
submit_url = '%s://%s/submit' % (protocol, grader_url)
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(submit_url, data)
with urllib.request.urlopen(req) as response:
return response.readall().decode('utf-8')
def import_module(module):
mpath, mname = os.path.split(module)
mname = os.path.splitext(mname)[0]
return imp.load_module(mname, *imp.find_module(mname, [mpath]))
def source(source_files):
src = ['# submit version: %s\n' % SUBMIT_VERSION]
for fn in source_files:
src.append('# %s' % fn)
with open(fn) as source_f:
src.append(source_f.read())
src.append('')
return '\n'.join(src)
def strip(s): return s.strip() if isinstance(s, str) else s
def canonicalize_key(key_value_pair):
return tuple(map(lambda s:s.strip(), (key_value_pair[0].upper(), key_value_pair[1])))
if __name__ == '__main__':
try:
f = open("profile.txt")
profile = dict([canonicalize_key(re.match("\s*([^\s]*)\s*(.*)\s*", line).groups()) for line in f])
except (IOError, OSError):
print("No profile.txt found")
profile = {}
import argparse
parser = argparse.ArgumentParser()
helps = [ 'assignment name'
, 'numbers or ranges of problems/tasks to submit'
, 'your username (you can make one up)'
, 'your password (optional)'
, 'your geographical location (optional, used for mapping activity)'
, 'display tests without actually running them'
, 'specify where to send the results'
, 'use an encrypted connection to the grading server'
, 'use an unencrypted connection to the grading server'
]
ihelp = iter(helps)
parser.add_argument('assign', help=next(ihelp))
parser.add_argument('tasks', default=profile.get('TASKS',None), nargs='*', help=next(ihelp))
parser.add_argument('--username', '--login', default=profile.get('USERNAME',None), help=next(ihelp))
parser.add_argument('--password', default=profile.get('PASSWORD',None), help=next(ihelp))
parser.add_argument('--location', default=profile.get('LOCATION',None), help=next(ihelp))
parser.add_argument('--dry-run', default=False, action='store_true', help=next(ihelp))
parser.add_argument('--report', default=profile.get('REPORT',None), help=next(ihelp))
group = parser.add_mutually_exclusive_group()
group.add_argument('--https', dest="protocol", const="https", action="store_const", help=next(ihelp))
group.add_argument('--http', dest="protocol", const="http", action="store_const", help=next(ihelp))
parser.add_argument('--verbose', default=False, action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--show-submission', default=False, action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--show-feedback', default=False, action='store_true', help=argparse.SUPPRESS)
args = parser.parse_args()
asgn_name = os.path.splitext(args.assign)[0]
report = args.report
location = args.location
dry_run = args.dry_run
if args.protocol: protocol = args.protocol
challenge_url = '%s://class.coursera.org/%s/assignment/challenge' % (protocol, session)
print("CHALLENGE URL ", challenge_url)
verbose = args.verbose
show_submission = args.show_submission
show_feedback = args.show_feedback
login = args.username
password = args.password
submit(','.join(args.tasks))
|
|
# -*- coding: utf-8 -*-
import sys, traceback, time, threading, os, base64, logging
import spotify
import spotipy
from spotify.manager import SpotifySessionManager, SpotifyPlaylistManager, \
SpotifyContainerManager
from spotipy.core.basewrapper import BaseWrapper
import spotifyplaylist
from spotipy.backends.local.gstreamerwrapper import GStreamerWrapper
from spotipy.backends.spotify.spotifyaudiofile import SpotifyAudioFile
from spotipy.backends.spotify.spotifyartist import SpotifyArtist
from spotipy.backends.spotify.spotifyalbum import SpotifyAlbum
from spotipy.core.album import Album
from spotipy.core.artist import Artist
from spotipy.core.result import Result
from spotify import Link, SpotifyError, ToplistBrowser
#perform multiple fall-backs on availability of sound controllers
try:
from spotify.alsahelper import AlsaController
except ImportError:
try:
from spotify.osshelper import OssController as AlsaController
except:
logger = logging.getLogger("spotipy.backends.spotify.SpotifyWrapper")
logger.warning("pythhon-alsaaudio or python-oss must be installed for Spotify")
raise
## playlist callbacks ##
class DummyPlaylistManager(SpotifyPlaylistManager):
def tracks_added(self, p, t, i, u):
#print 'Tracks added to playlist %s' % p.name()
pass
def tracks_moved(self, p, t, i, u):
#print 'Tracks moved in playlist %s' % p.name()
pass
def tracks_removed(self, p, t, u):
#print 'Tracks removed from playlist %s' % p.name()
pass
## container calllbacks ##
class DummyContainerManager(SpotifyContainerManager):
def __init__(self, session_manager):
self.loaded = False
self.session_manager = session_manager
def container_loaded(self, c, u):
self.loaded = True
self.session_manager.container_manager_loaded()
def playlist_added(self, c, p, i, u):
self.session_manager.playlist_manager.watch(p)
p.add_playlist_state_changed_callback(self.session_manager.playlist_state_changed, u)
def playlist_moved(self, c, p, oi, ni, u):
pass
def playlist_removed(self, c, p, i, u):
pass
class SpotifyWrapper(BaseWrapper, SpotifySessionManager, threading.Thread):
appkey_file = None
cache_location = spotipy.CACHE_PATH
settings_location = spotipy.SETTINGS_PATH
application_key = base64.decodestring(spotipy.SPOTIFY_KEY)
user_agent = 'spotipy'
__now_playing = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(SpotifyWrapper, cls).__new__(
cls, *args, **kwargs)
return cls._instance
@classmethod
def get_instance(cls):
return cls._instance
def __init__(self):
threading.Thread.__init__(self)
SpotifySessionManager.__init__(self, spotipy.settings.SPOTIFY_USERNAME, spotipy.settings.SPOTIFY_PASSWORD, True)
self.__logger = logging.getLogger("spotipy.backends.spotify.SpotifyWrapper")
self.__logger.info("SpotiPy uses SPOTIFY(R) CORE")
self.__audio = AlsaController()
self.ctr = None
self.__status = spotipy.PlayState.NULL
self.__watchers = []
self.setDaemon(True)
self.__track_end_listeners = []
self.__md_counter = 0
self.__md_count_to_ms_factor = 46.4
self.__playlist_cb = {}
self.__playlist_complete = False
self.add_playlist_listener(self.__spotify_playlist_listener)
self.__login()
self.__local_files = GStreamerWrapper.get_instance()
self.__audio_files = {}
def __spotify_playlist_listener(self, sender):
""" Called when a playlist is loaded """
complete = True
for i, pl in enumerate(sender.ctr):
complete = complete and pl.is_loaded()
if complete and self.__playlist_cb != None:
self.__playlist_complete = True
l = self.__playlist_cb
self.__playlist_cb = {}
for cb in l.keys():
cb(self.__spotify_get_playlists(), l[cb])
def get_playlists(self, cb, *data):
if self.__playlist_complete:
pass
self.__playlist_cb[cb] = data
def __spotify_get_playlists(self):
spotify_node = spotifyplaylist.SpotifyPlayList(self, None, "Spotify")
row = [spotify_node]
for i, x in enumerate(self.ctr):
if x.type() == "playlist":
tmp_row = spotifyplaylist.SpotifyPlayList(self, x)
tmp_row.set_parent_list(row[0])
row[0].add_child_list(tmp_row)
elif x.type() == "folder_start":
tmp_row = spotifyplaylist.SpotifyPlayList(self, x)
tmp_row.set_parent_list(row[0])
row[0].add_child_list(tmp_row)
row.insert(0, tmp_row)
elif x.type() == "folder_end":
row.pop(0)
return [spotify_node]
def add_playlist_listener(self, listener):
self.__watchers.append(listener)
pass
def add_track_listener(self, listener):
self.__track_end_listeners.append(listener)
def __login(self):
self.playlist_manager = DummyPlaylistManager()
self.container_manager = DummyContainerManager(self)
self.__logger.info("Logging in to Spotify")
def run(self):
try:
self.connect()
except SpotifyError as ex:
self.__logger.warning("SpotifyError: %s", str(ex))
def playlist_state_changed(self, p, u):
if p.is_loaded():
for x in self.__watchers:
x(self)
def get_starred(self):
return spotifyplaylist.SpotifyPlayList(self, self.starred, "Starred")
def container_manager_loaded(self):
for x in self.__watchers:
x(self)
def logged_in(self, session, error):
if error:
self.__logger.error("%s", error)
return
self.session = session
if spotipy.settings.SPOTIFY_BITRATE == 160:
self.__logger.info("Spotify Bitrate: %s", "160")
self.set_preferred_bitrate(0)
elif spotipy.settings.SPOTIFY_BITRATE == 320:
self.__logger.info("Spotify Bitrate: %s", "320")
self.set_preferred_bitrate(1)
else:
self.__logger.info("Spotify Bitrate: %s", "96")
self.set_preferred_bitrate(2)
try:
#print "Logged In..."
self.__logger.debug("Logged in to Spotify")
self.ctr = session.playlist_container()
self.container_manager.watch(self.ctr)
self.starred = session.starred()
except Exception as ex:
self.__logger.error("Spotify Login Error: %s", str(ex))
#traceback.print_exc()
def logged_out(self, session):
pass
def do_search(self, query, callback = None):
self.__search_cb = callback
self.session.search(query, self.__search_callback)
return []
def __search_callback(self, results, user):
r = Result()
try:
if self.__search_cb != None:
count = 1
l = []
for track in results.tracks():
af = self.create_audio_file(track)
af.set_index(count)
af.set_tracknumber(count)
count += 1
af.set_wrapper(self)
insert = True
for x in l:
if x._probably_equal(af):
x.add_similar(af)
insert = False
break
if insert:
l.append(af)
r.set_tracks(l)
#print r.get_tracks()
r.set_query(results.query())
r.set_suggestion(results.did_you_mean())
artist_list = []
for artist in results.artists():
a = SpotifyArtist(artist, self)
artist_list.append(a)
r.set_artists(artist_list)
album_list = []
for album in results.albums():
a = SpotifyAlbum(album, self)
album_list.append(a)
r.set_albums(album_list)
self.__search_cb(self, r)
except:
raise
self.__search_cb(self, r)
return
def __load_track(self, track):
if self.__status == spotipy.PlayState.PLAYING:
self.stop()
self.session.load(track)
self.__md_counter = 0
self.__now_playing = track
def pause(self):
if self.__status == spotipy.PlayState.PAUSED:
self.session.play(1)
self.__status = spotipy.PlayState.PLAYING
elif self.__status == spotipy.PlayState.PLAYING:
self.session.play(0)
self.__status = spotipy.PlayState.PAUSED
return self.__status
def get_username(self):
return self.session.username()
def play(self, track):
if type(track) == str or type(track) == unicode:
track = spotify.Link.from_string(track).as_track()
l = self.__local_files.find_tracks(track.album().name(), track.artists()[0].name(), track.name())
if len(l) > 0:
self.__logger.info("Playing Local Track: %s", l[0].get_uri())
return l[0].play()
self.__load_track(track)
self.session.play(1)
self.__status = spotipy.PlayState.PLAYING
return self.create_audio_file(track)
def stop(self):
self.session.play(0)
self.__status = spotipy.PlayState.STOPPED
self.__now_playing = None
def get_position(self):
if self.__now_playing != None:
return self.__md_counter * self.__md_count_to_ms_factor
else:
return -1
def __seek(self, seconds):
pass
position = property(get_position)
def is_playing(self):
return self.__now_playing != None
def get_duration(self):
if self.__now_playing != None:
return self.__now_playing.duration()
return -1
duration = property(get_duration)
def get_norm_position(self):
if self.__now_playing != None:
ret = float(self.position) / float(self.duration)
return ret * float(100.0)
else:
return -1
def set_norm_position(self, percent):
if self.__status == spotipy.PlayState.PLAYING:
try:
factor = (float(100.0) / percent)
f_value = float(self.duration) / factor
value = int(f_value)
except ZeroDivisionError:
f_value = 0
#this will happen if seeking back to the beginning of the track.
value = 0
except:
#this will probably happen when we are seeking to the end of the track.
value = -1
if value >= 0:
try:
self.session.play(0)
self.session.seek(value)
self.session.play(1)
self.__md_counter = int(f_value / self.__md_count_to_ms_factor)
except Exception as ex:
self.__logger.warning("Error Playing: %s", str(ex))
norm_position = property(get_norm_position, set_norm_position)
def music_delivery(self, *a, **kw):
self.__md_counter = self.__md_counter + 1
return self.__audio.music_delivery(*a, **kw)
def end_of_track(self, sess):
try:
for listener in self.__track_end_listeners:
listener(self, None)
except Exception as ex:
self.__logger.warning("Error In Listener: %s", str(ex))
def set_preferred_bitrate(self, value):
if value == 96:
value = 2
elif value == 160:
value = 0
elif value == 320:
value = 1
try:
if self.session != None:
self.session.set_preferred_bitrate(value)
except:
pass
#def browse(self, link, callback):
# if link.type() == link.LINK_ALBUM:
# browser = self.session.browse_album(link.as_album(), callback)
# while not browser.is_loaded():
# time.sleep(0.1)
# for track in browser:
# print track
# if link.type() == link.LINK_ARTIST:
# browser = self.session.browse_artist(link.as_artist(), callback)
# while not browser.is_loaded():
# time.sleep(0.1)
# for album in browser:
# print album.name()
# callback(browser)
def watch(self, p, unwatch=False):
if not unwatch:
self.__logger.debug("Watching playlist: %s", p.name())
self.playlist_manager.watch(p);
else:
self.__logger.debug("Unwatching playlist: %s", p.name())
self.playlist_manager.unwatch(p)
#def toplist(self, tl_type, tl_region):
# print repr(tl_type)
# print repr(tl_region)
# def callback(tb, ud):
# for i in xrange(len(tb)):
# print '%3d: %s' % (i+1, tb[i].name())
# tb = ToplistBrowser(tl_type, tl_region, callback)
def shutdown(self):
self.stop()
self.session.logout()
def create_audio_file(self, data):
key = str(spotify.Link.from_track(data, 0))
if not key in self.__audio_files.keys():
import spotifyaudiofile
ret = spotifyaudiofile.SpotifyAudioFile(data)
ret.set_wrapper(self)
self.__audio_files[key] = ret
return ret
else:
return self.__audio_files[key]
|
|
# coding: utf8
import logging
import os
import sys
from argparse import ArgumentParser, Namespace
from datetime import datetime
from pathlib import Path
from appdirs import AppDirs
from gphotos import Utils
from gphotos.authorize import Authorize
from gphotos.Checks import do_check, get_check
from gphotos.GoogleAlbumsSync import GoogleAlbumsSync
from gphotos.GooglePhotosDownload import GooglePhotosDownload
from gphotos.GooglePhotosIndex import GooglePhotosIndex
from gphotos.LocalData import LocalData
from gphotos.LocalFilesScan import LocalFilesScan
from gphotos.Logging import setup_logging
from gphotos.restclient import RestClient
from gphotos.Settings import Settings
from gphotos import __version__
if os.name == "nt":
import subprocess
orig_Popen = subprocess.Popen
class Popen_patch(subprocess.Popen):
def __init__(self, *args, **kargs):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kargs["startupinfo"] = startupinfo
super().__init__(*args, **kargs)
subprocess.Popen = Popen_patch
else:
import fcntl
APP_NAME = "gphotos-sync"
log = logging.getLogger(__name__)
class GooglePhotosSyncMain:
def __init__(self):
self.data_store: LocalData = None
self.google_photos_client: RestClient = None
self.google_photos_idx: GooglePhotosIndex = None
self.google_photos_down: GooglePhotosDownload = None
self.google_albums_sync: GoogleAlbumsSync = None
self.local_files_scan: LocalFilesScan = None
self._start_date = None
self._end_date = None
self.auth: Authorize = None
try:
version_string = "version: {}, database schema version {}".format(
__version__, LocalData.VERSION
)
except TypeError:
version_string = "(version not available)"
parser = ArgumentParser(
epilog=version_string, description="Google Photos download tool"
)
parser.add_argument(
"root_folder", help="root of the local folders to download into"
)
album_group = parser.add_mutually_exclusive_group()
album_group.add_argument(
"--album",
action="store",
help="only synchronize the contents of a single album. "
'use quotes e.g. "album name" for album names with spaces',
)
album_group.add_argument(
"--album-regex",
action="store",
metavar='REGEX',
help="""only synchronize albums that match regular expression.
regex is case insensitive and unanchored. e.g. to select two albums:
"^(a full album name|another full name)$" """
)
parser.add_argument(
"--log-level",
help="Set log level. Options: critical, error, warning, info, debug, trace. "
"trace logs all Google API calls to a file with suffix .trace",
default="warning",
)
parser.add_argument(
"--logfile",
action="store",
help="full path to debug level logfile, default: <root>/gphotos.log. "
"If a directory is specified then a unique filename will be "
"generated.",
)
parser.add_argument(
"--compare-folder",
action="store",
help="root of the local folders to compare to the Photos Library",
)
parser.add_argument(
"--favourites-only",
action="store_true",
help="only download media marked as favourite (star)",
)
parser.add_argument(
"--flush-index",
action="store_true",
help="delete the index db, re-scan everything",
)
parser.add_argument(
"--rescan",
action="store_true",
help="rescan entire library, ignoring last scan date. Use this if you "
"have added photos to the library that "
"predate the last sync, or you have deleted some of the local "
"files",
)
parser.add_argument(
"--retry-download",
action="store_true",
help="check for the existence of files marked as already downloaded "
"and re-download any missing ones. Use "
"this if you have deleted some local files",
)
parser.add_argument(
"--skip-video", action="store_true", help="skip video types in sync"
)
parser.add_argument(
"--skip-shared-albums",
action="store_true",
help="skip albums that only appear in 'Sharing'",
)
parser.add_argument(
"--album-date-by-first-photo",
action="store_true",
help="Make the album date the same as its earliest "
"photo. The default is its last photo",
)
parser.add_argument(
"--start-date",
help="Set the earliest date of files to sync" "format YYYY-MM-DD",
default=None,
)
parser.add_argument(
"--end-date",
help="Set the latest date of files to sync" "format YYYY-MM-DD",
default=None,
)
parser.add_argument(
"--db-path",
help="Specify a pre-existing folder for the index database. "
"Defaults to the root of the local download folders",
default=None,
)
parser.add_argument(
"--albums-path",
help="Specify a folder for the albums "
"Defaults to the 'albums' in the local download folders",
default="albums",
)
parser.add_argument(
"--photos-path",
help="Specify a folder for the photo files. "
"Defaults to the 'photos' in the local download folders",
default="photos",
)
parser.add_argument(
"--use-flat-path",
action="store_true",
help="Mandate use of a flat directory structure ('YYYY-MMM') and not "
"a nested one ('YYYY/MM') . ",
)
parser.add_argument(
"--omit-album-date",
action="store_true",
help="Don't include year and month in album folder names.",
)
parser.add_argument("--new-token", action="store_true", help="Request new token")
parser.add_argument(
"--index-only",
action="store_true",
help="Only build the index of files in .gphotos.db - no downloads",
)
parser.add_argument(
"--skip-index",
action="store_true",
help="Use index from previous run and start download immediately",
)
parser.add_argument(
"--do-delete",
action="store_true",
help="""Remove local copies of files that were deleted.
Must be used with --flush-index since the deleted items must be removed
from the index""",
)
parser.add_argument(
"--skip-files",
action="store_true",
help="Dont download files, just refresh the album links (for testing)",
)
parser.add_argument(
"--skip-albums", action="store_true", help="Dont download albums (for testing)"
)
parser.add_argument(
"--use-hardlinks",
action="store_true",
help="Use hardlinks instead of symbolic links in albums and comparison"
" folders",
)
parser.add_argument(
"--no-album-index",
action="store_true",
help="only index the photos library - skip indexing of folder contents "
"(for testing)",
)
parser.add_argument(
"--case-insensitive-fs",
action="store_true",
help="add this flag if your filesystem is case insensitive",
)
parser.add_argument(
"--max-retries",
help="Set the number of retries on network timeout / failures",
default=5,
)
parser.add_argument(
"--max-threads",
help="Set the number of concurrent threads to use for parallel "
"download of media - reduce this number if network load is "
"excessive",
default=20,
)
parser.add_argument(
"--secret",
help="Path to client secret file (by default this is in the "
"application config directory)",
)
parser.add_argument(
"--archived",
action="store_true",
help="Download media items that have been marked as archived",
)
parser.add_argument(
"--progress",
action="store_true",
help="show progress of indexing and downloading in warning log",
)
parser.add_argument(
"--max-filename",
help="Set the maxiumum filename length for target filesystem."
"This overrides the automatic detection.",
default=0,
)
parser.add_argument(
"--ntfs",
action="store_true",
help="Declare that the target filesystem is ntfs (or ntfs like)."
"This overrides the automatic detection.",
)
parser.add_help = True
def setup(self, args: Namespace, db_path: Path):
root_folder = Path(args.root_folder).absolute()
compare_folder = None
if args.compare_folder:
compare_folder = Path(args.compare_folder).absolute()
app_dirs = AppDirs(APP_NAME)
self.data_store = LocalData(db_path, args.flush_index)
credentials_file = db_path / ".gphotos.token"
if args.secret:
secret_file = Path(args.secret)
else:
secret_file = Path(app_dirs.user_config_dir) / "client_secret.json"
if args.new_token and credentials_file.exists():
credentials_file.unlink()
scope = [
"https://www.googleapis.com/auth/photoslibrary.readonly",
"https://www.googleapis.com/auth/photoslibrary.sharing",
]
photos_api_url = (
"https://photoslibrary.googleapis.com/$discovery" "/rest?version=v1"
)
self.auth = Authorize(
scope, credentials_file, secret_file, int(args.max_retries)
)
self.auth.authorize()
settings = Settings(
start_date=Utils.string_to_date(args.start_date),
end_date=Utils.string_to_date(args.end_date),
shared_albums=not args.skip_shared_albums,
album_index=not args.no_album_index,
use_start_date=args.album_date_by_first_photo,
album=args.album,
album_regex=args.album_regex,
favourites_only=args.favourites_only,
retry_download=args.retry_download,
case_insensitive_fs=args.case_insensitive_fs,
include_video=not args.skip_video,
rescan=args.rescan,
archived=args.archived,
photos_path=Path(args.photos_path),
albums_path=Path(args.albums_path),
use_flat_path=args.use_flat_path,
max_retries=int(args.max_retries),
max_threads=int(args.max_threads),
omit_album_date=args.omit_album_date,
use_hardlinks=args.use_hardlinks,
progress=args.progress,
ntfs_override=args.ntfs
)
self.google_photos_client = RestClient(photos_api_url, self.auth.session)
self.google_photos_idx = GooglePhotosIndex(
self.google_photos_client, root_folder, self.data_store, settings
)
self.google_photos_down = GooglePhotosDownload(
self.google_photos_client, root_folder, self.data_store, settings
)
self.google_albums_sync = GoogleAlbumsSync(
self.google_photos_client,
root_folder,
self.data_store,
args.flush_index or args.retry_download or args.rescan,
settings,
)
if args.compare_folder:
self.local_files_scan = LocalFilesScan(
root_folder, compare_folder, self.data_store
)
def do_sync(self, args: Namespace):
files_downloaded = 0
with self.data_store:
if not args.skip_index:
if not args.skip_files and not args.album and not args.album_regex:
self.google_photos_idx.index_photos_media()
if not args.index_only:
if not args.skip_files:
files_downloaded = self.google_photos_down.download_photo_media()
if (
not args.skip_albums
and not args.skip_index
and (files_downloaded > 0 or args.skip_files or args.rescan)
) or (args.album is not None or args.album_regex is not None):
self.google_albums_sync.index_album_media()
# run download again to pick up files indexed in albums only
if not args.index_only:
if not args.skip_files:
files_downloaded = (
self.google_photos_down.download_photo_media()
)
if not args.index_only:
if (
not args.skip_albums
and (files_downloaded > 0 or args.skip_files or args.rescan)
or (args.album is not None or args.album_regex is not None)
):
self.google_albums_sync.create_album_content_links()
if args.do_delete:
self.google_photos_idx.check_for_removed()
if args.compare_folder:
if not args.skip_index:
self.local_files_scan.scan_local_files()
self.google_photos_idx.get_extra_meta()
self.local_files_scan.find_missing_gphotos()
def start(self, args: Namespace):
self.do_sync(args)
@staticmethod
def fs_checks(root_folder: Path, args: dict):
Utils.minimum_date(root_folder)
# store the root folder filesystem checks globally for all to inspect
do_check(root_folder, int(args.max_filename), bool(args.ntfs))
# check if symlinks are supported
# NTFS supports symlinks, but is_symlink() fails
if not args.ntfs:
if not get_check().is_symlink:
args.skip_albums = True
# check if file system is case sensitive
if not args.case_insensitive_fs:
if not get_check().is_case_sensitive:
args.case_insensitive_fs = True
return args
def main(self, test_args: dict = None):
start_time = datetime.now()
args = self.parser.parse_args(test_args)
root_folder = Path(args.root_folder).absolute()
db_path = Path(args.db_path) if args.db_path else root_folder
if not root_folder.exists():
root_folder.mkdir(parents=True, mode=0o700)
setup_logging(args.log_level, args.logfile, root_folder)
log.warning(f"gphotos-sync {__version__} {start_time}")
args = self.fs_checks(root_folder, args)
lock_file = db_path / "gphotos.lock"
fp = lock_file.open("w")
with fp:
try:
if os.name != "nt":
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
log.warning("EXITING: database is locked")
sys.exit(0)
log.info(self.version_string)
# configure and launch
# noinspection PyBroadException
try:
self.setup(args, db_path)
self.start(args)
except KeyboardInterrupt:
log.error("User cancelled download")
log.debug("Traceback", exc_info=True)
exit(1)
except BaseException:
log.error("\nProcess failed.", exc_info=True)
exit(1)
finally:
log.warning("Done.")
elapsed_time = datetime.now() - start_time
log.info("Elapsed time = %s", elapsed_time)
def main():
GooglePhotosSyncMain().main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for using the TensorFlow C API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import api_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.client import pywrap_tf_session as c_api
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
class ScopedTFStatus(object):
"""Wrapper around TF_Status that handles deletion."""
def __init__(self):
self.status = c_api.TF_NewStatus()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteStatus is not None:
c_api.TF_DeleteStatus(self.status)
class ScopedTFGraph(object):
"""Wrapper around TF_Graph that handles deletion."""
def __init__(self):
self.graph = c_api.TF_NewGraph()
# Note: when we're destructing the global context (i.e when the process is
# terminating) we may have already deleted other modules. By capturing the
# DeleteGraph function here, we retain the ability to cleanly destroy the
# graph at shutdown, which satisfies leak checkers.
self.deleter = c_api.TF_DeleteGraph
def __del__(self):
self.deleter(self.graph)
class ScopedTFImportGraphDefOptions(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
def __init__(self):
self.options = c_api.TF_NewImportGraphDefOptions()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefOptions is not None:
c_api.TF_DeleteImportGraphDefOptions(self.options)
class ScopedTFImportGraphDefResults(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
def __init__(self, results):
self.results = results
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefResults is not None:
c_api.TF_DeleteImportGraphDefResults(self.results)
class ScopedTFFunction(object):
"""Wrapper around TF_Function that handles deletion."""
def __init__(self, func):
self.func = func
# Note: when we're destructing the global context (i.e when the process is
# terminating) we may have already deleted other modules. By capturing the
# DeleteFunction function here, we retain the ability to cleanly destroy the
# Function at shutdown, which satisfies leak checkers.
self.deleter = c_api.TF_DeleteFunction
def __del__(self):
if self.func is not None:
self.deleter(self.func)
self.func = None
class ScopedTFBuffer(object):
"""An internal class to help manage the TF_Buffer lifetime."""
def __init__(self, buf_string):
self.buffer = c_api.TF_NewBufferFromString(compat.as_bytes(buf_string))
def __del__(self):
c_api.TF_DeleteBuffer(self.buffer)
class ApiDefMap(object):
"""Wrapper around Tf_ApiDefMap that handles querying and deletion.
The OpDef protos are also stored in this class so that they could
be queried by op name.
"""
def __init__(self):
op_def_proto = op_def_pb2.OpList()
buf = c_api.TF_GetAllOpList()
try:
op_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
self._api_def_map = c_api.TF_NewApiDefMap(buf)
finally:
c_api.TF_DeleteBuffer(buf)
self._op_per_name = {}
for op in op_def_proto.op:
self._op_per_name[op.name] = op
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteApiDefMap is not None:
c_api.TF_DeleteApiDefMap(self._api_def_map)
def put_api_def(self, text):
c_api.TF_ApiDefMapPut(self._api_def_map, text, len(text))
def get_api_def(self, op_name):
api_def_proto = api_def_pb2.ApiDef()
buf = c_api.TF_ApiDefMapGet(self._api_def_map, op_name, len(op_name))
try:
api_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
finally:
c_api.TF_DeleteBuffer(buf)
return api_def_proto
def get_op_def(self, op_name):
if op_name in self._op_per_name:
return self._op_per_name[op_name]
raise ValueError("No entry found for " + op_name + ".")
def op_names(self):
return self._op_per_name.keys()
@tf_contextlib.contextmanager
def tf_buffer(data=None):
"""Context manager that creates and deletes TF_Buffer.
Example usage:
with tf_buffer() as buf:
# get serialized graph def into buf
...
proto_data = c_api.TF_GetBuffer(buf)
graph_def.ParseFromString(compat.as_bytes(proto_data))
# buf has been deleted
with tf_buffer(some_string) as buf:
c_api.TF_SomeFunction(buf)
# buf has been deleted
Args:
data: An optional `bytes`, `str`, or `unicode` object. If not None, the
yielded buffer will contain this data.
Yields:
Created TF_Buffer
"""
if data:
buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))
else:
buf = c_api.TF_NewBuffer()
try:
yield buf
finally:
c_api.TF_DeleteBuffer(buf)
def tf_output(c_op, index):
"""Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output
"""
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret
def tf_operations(graph):
"""Generator that yields every TF_Operation in `graph`.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# pylint: disable=protected-access
pos = 0
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
while c_op is not None:
yield c_op
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
# pylint: enable=protected-access
def new_tf_operations(graph):
"""Generator that yields newly-added TF_Operations in `graph`.
Specifically, yields TF_Operations that don't have associated Operations in
`graph`. This is useful for processing nodes added by the C API.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# TODO(b/69679162): do this more efficiently
for c_op in tf_operations(graph):
try:
graph._get_operation_by_tf_operation(c_op) # pylint: disable=protected-access
except KeyError:
yield c_op
|
|
"""
Synthetic data loaders for testing.
"""
from bcolz import ctable
from numpy import (
arange,
array,
float64,
full,
iinfo,
uint32,
)
from pandas import DataFrame, Timestamp
from six import iteritems
from sqlite3 import connect as sqlite3_connect
from .base import PipelineLoader
from .frame import DataFrameLoader
from zipline.data.us_equity_pricing import (
BcolzDailyBarWriter,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
UINT_32_MAX = iinfo(uint32).max
def nanos_to_seconds(nanos):
return nanos / (1000 * 1000 * 1000)
class ConstantLoader(PipelineLoader):
"""
Synthetic PipelineLoader that returns a constant value for each column.
Parameters
----------
constants : dict
Map from column to value(s) to use for that column.
Values can be anything that can be passed as the first positional
argument to a DataFrame of the same shape as `mask`.
mask : pandas.DataFrame
Mask indicating when assets existed.
Indices of this frame are used to align input queries.
Notes
-----
Adjustments are unsupported with ConstantLoader.
"""
def __init__(self, constants, dates, assets):
loaders = {}
for column, const in iteritems(constants):
frame = DataFrame(
const,
index=dates,
columns=assets,
dtype=column.dtype,
)
loaders[column] = DataFrameLoader(
column=column,
baseline=frame,
adjustments=None,
)
self._loaders = loaders
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load by delegating to sub-loaders.
"""
out = []
for col in columns:
try:
loader = self._loaders[col]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.extend(loader.load_adjusted_array([col], dates, assets, mask))
return out
class SyntheticDailyBarWriter(BcolzDailyBarWriter):
"""
Bcolz writer that creates synthetic data based on asset lifetime metadata.
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : DatetimeIndex
Calendar to use for constructing asset lifetimes.
"""
OHLCV = ('open', 'high', 'low', 'close', 'volume')
OHLC = ('open', 'high', 'low', 'close')
PSEUDO_EPOCH = Timestamp('2000-01-01', tz='UTC')
def __init__(self, asset_info, calendar):
super(SyntheticDailyBarWriter, self).__init__()
assert (
# Using .value here to avoid having to care about UTC-aware dates.
self.PSEUDO_EPOCH.value <
calendar.min().value <=
asset_info['start_date'].min().value
)
assert (asset_info['start_date'] < asset_info['end_date']).all()
self._asset_info = asset_info
self._calendar = calendar
def _raw_data_for_asset(self, asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See class docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
dates = self._calendar[
self._calendar.slice_indexer(
self.asset_start(asset_id), self.asset_end(asset_id)
)
]
data = full(
(len(dates), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * (100 * 1000),
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5) * (10 * 1000)
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += (dates - self.PSEUDO_EPOCH).days[:, None]
frame = DataFrame(
data,
index=dates,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
frame['day'] = nanos_to_seconds(dates.asi8)
frame['id'] = asset_id
return ctable.fromdataframe(frame)
def asset_start(self, asset):
ret = self._asset_info.loc[asset]['start_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def asset_end(self, asset):
ret = self._asset_info.loc[asset]['end_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
@classmethod
def expected_value(cls, asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100 * 1000
from_colname = cls.OHLCV.index(colname) * (10 * 1000)
from_date = (date - cls.PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
def expected_values_2d(self, dates, assets, colname):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Values before/after an assets lifetime are filled with 0 for volume and
NaN for price columns.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
start, end = self.asset_start(asset), self.asset_end(asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
data[i, j] = self.expected_value(asset, date, colname)
return data
# BEGIN SUPERCLASS INTERFACE
def gen_tables(self, assets):
for asset in assets:
yield asset, self._raw_data_for_asset(asset)
def to_uint32(self, array, colname):
if colname in {'open', 'high', 'low', 'close'}:
# Data is stored as 1000 * raw value.
assert array.max() < (UINT_32_MAX / 1000), "Test data overflow!"
return array * 1000
else:
assert colname in ('volume', 'day'), "Unknown column: %s" % colname
return array
# END SUPERCLASS INTERFACE
class NullAdjustmentReader(SQLiteAdjustmentReader):
"""
A SQLiteAdjustmentReader that stores no adjustments and uses in-memory
SQLite.
"""
def __init__(self):
conn = sqlite3_connect(':memory:')
writer = SQLiteAdjustmentWriter(conn, None, None)
empty = DataFrame({
'sid': array([], dtype=uint32),
'effective_date': array([], dtype=uint32),
'ratio': array([], dtype=float),
})
empty_dividends = DataFrame({
'sid': array([], dtype=uint32),
'amount': array([], dtype=float64),
'record_date': array([], dtype='datetime64[ns]'),
'ex_date': array([], dtype='datetime64[ns]'),
'declared_date': array([], dtype='datetime64[ns]'),
'pay_date': array([], dtype='datetime64[ns]'),
})
writer.write(splits=empty, mergers=empty, dividends=empty_dividends)
super(NullAdjustmentReader, self).__init__(conn)
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'NfSamplingModeEnum' : _MetaInfoEnum('NfSamplingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg',
{
'random':'random',
}, 'Cisco-IOS-XR-traffmon-netflow-cfg', _yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg']),
'NfCacheAgingModeEnum' : _MetaInfoEnum('NfCacheAgingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg',
{
'normal':'normal',
'permanent':'permanent',
'immediate':'immediate',
}, 'Cisco-IOS-XR-traffmon-netflow-cfg', _yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg']),
'NetFlow.FlowExporterMaps.FlowExporterMap.Udp' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Udp',
False,
[
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('1024', '65535')], [],
''' Configure Destination UDP port
''',
'destination_port',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'udp',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options',
False,
[
_MetaInfoClassMember('interface-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting interface
table
''',
'interface_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('sampler-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting sampler table
''',
'sampler_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('vrf-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting vrf table
''',
'vrf_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'options',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version',
False,
[
_MetaInfoClassMember('version-number', ATTRIBUTE, 'int' , None, None,
[('9', '10')], [],
''' Export version number
''',
'version_number',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('common-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify custom timeout for the template
''',
'common_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('data-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Data template configuration options
''',
'data_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('options', REFERENCE_CLASS, 'Options' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options',
[], [],
''' Specify options for exporting templates
''',
'options',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('options-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Option template configuration options
''',
'options_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'version',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions',
False,
[
_MetaInfoClassMember('version', REFERENCE_LIST, 'Version' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version',
[], [],
''' Configure export version options
''',
'version',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'versions',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Destination' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Destination',
False,
[
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Destination IPv4 address
''',
'ip_address',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPV6 address of the tunnel destination
''',
'ipv6_address',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'destination',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap',
False,
[
_MetaInfoClassMember('exporter-map-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Exporter map name
''',
'exporter_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('destination', REFERENCE_CLASS, 'Destination' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Destination',
[], [],
''' Configure export destination (collector)
''',
'destination',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Specify DSCP value for export packets
''',
'dscp',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('packet-length', ATTRIBUTE, 'int' , None, None,
[('512', '1468')], [],
''' Configure Maximum Value for Export Packet size
''',
'packet_length',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Configure source interface for collector
''',
'source_interface',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('udp', REFERENCE_CLASS, 'Udp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Udp',
[], [],
''' Use UDP as transport protocol
''',
'udp',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('versions', REFERENCE_CLASS, 'Versions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions',
[], [],
''' Specify export version parameters
''',
'versions',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-exporter-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps',
False,
[
_MetaInfoClassMember('flow-exporter-map', REFERENCE_LIST, 'FlowExporterMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap',
[], [],
''' Exporter map name
''',
'flow_exporter_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-exporter-maps',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode',
False,
[
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'NfSamplingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfSamplingModeEnum',
[], [],
''' Sampling mode
''',
'mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Sampling interval in units of packets
''',
'interval',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('sample-number', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets to be sampled in the
sampling interval
''',
'sample_number',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'sampling-mode',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes',
False,
[
_MetaInfoClassMember('sampling-mode', REFERENCE_LIST, 'SamplingMode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode',
[], [],
''' Configure sampling mode
''',
'sampling_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'sampling-modes',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap',
False,
[
_MetaInfoClassMember('sampler-map-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Sampler map name
''',
'sampler_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('sampling-modes', REFERENCE_CLASS, 'SamplingModes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes',
[], [],
''' Configure packet sampling mode
''',
'sampling_modes',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-sampler-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps',
False,
[
_MetaInfoClassMember('flow-sampler-map', REFERENCE_LIST, 'FlowSamplerMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap',
[], [],
''' Sampler map name
''',
'flow_sampler_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-sampler-maps',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option',
False,
[
_MetaInfoClassMember('bgp-attr', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify if BGP Attributes AS_PATH STD_COMM
should be exported
''',
'bgp_attr',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('filtered', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether data should be filtered
''',
'filtered',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-bundle-member', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether to export physical ifh for
bundle interface
''',
'out_bundle_member',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-phys-int', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether it exports the physical output
interface
''',
'out_phys_int',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'option',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter',
False,
[
_MetaInfoClassMember('exporter-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Exporter name
''',
'exporter_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporter',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters',
False,
[
_MetaInfoClassMember('exporter', REFERENCE_LIST, 'Exporter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter',
[], [],
''' Configure exporter to be used by the
monitor-map
''',
'exporter',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporters',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record',
False,
[
_MetaInfoClassMember('label', ATTRIBUTE, 'int' , None, None,
[('1', '6')], [],
''' Enter label value for MPLS record type
''',
'label',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Flow record format (Either 'ipv4-raw'
,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4',
'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as')
''',
'record_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'record',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap',
False,
[
_MetaInfoClassMember('monitor-map-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Monitor map name
''',
'monitor_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('cache-active-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the active flow cache aging timeout
''',
'cache_active_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-aging-mode', REFERENCE_ENUM_CLASS, 'NfCacheAgingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfCacheAgingModeEnum',
[], [],
''' Specify the flow cache aging mode
''',
'cache_aging_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-entries', ATTRIBUTE, 'int' , None, None,
[('4096', '1000000')], [],
''' Specify the number of entries in the flow cache
''',
'cache_entries',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-inactive-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify the inactive flow cache aging timeout
''',
'cache_inactive_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-timeout-rate-limit', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Specify the maximum number of entries to age
each second
''',
'cache_timeout_rate_limit',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-update-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the update flow cache aging timeout
''',
'cache_update_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('exporters', REFERENCE_CLASS, 'Exporters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters',
[], [],
''' Configure exporters to be used by the
monitor-map
''',
'exporters',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('option', REFERENCE_CLASS, 'Option' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option',
[], [],
''' Specify an option for the flow cache
''',
'option',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record', REFERENCE_CLASS, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record',
[], [],
''' Specify a flow record format
''',
'record',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable',
False,
[
_MetaInfoClassMember('flow-monitor-map', REFERENCE_LIST, 'FlowMonitorMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap',
[], [],
''' Monitor map name
''',
'flow_monitor_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map-table',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option',
False,
[
_MetaInfoClassMember('bgp-attr', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify if BGP Attributes AS_PATH STD_COMM
should be exported
''',
'bgp_attr',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('filtered', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether data should be filtered
''',
'filtered',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-bundle-member', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether to export physical ifh for
bundle interface
''',
'out_bundle_member',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-phys-int', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether it exports the physical output
interface
''',
'out_phys_int',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'option',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter',
False,
[
_MetaInfoClassMember('exporter-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Exporter name
''',
'exporter_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporter',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters',
False,
[
_MetaInfoClassMember('exporter', REFERENCE_LIST, 'Exporter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter',
[], [],
''' Configure exporter to be used by the
monitor-map
''',
'exporter',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporters',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record',
False,
[
_MetaInfoClassMember('label', ATTRIBUTE, 'int' , None, None,
[('1', '6')], [],
''' Enter label value for MPLS record type
''',
'label',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Flow record format (Either 'ipv4-raw'
,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4',
'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as')
''',
'record_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'record',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap',
False,
[
_MetaInfoClassMember('monitor-map-name', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Monitor map name
''',
'monitor_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('cache-active-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the active flow cache aging timeout
''',
'cache_active_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-aging-mode', REFERENCE_ENUM_CLASS, 'NfCacheAgingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfCacheAgingModeEnum',
[], [],
''' Specify the flow cache aging mode
''',
'cache_aging_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-entries', ATTRIBUTE, 'int' , None, None,
[('4096', '1000000')], [],
''' Specify the number of entries in the flow cache
''',
'cache_entries',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-inactive-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify the inactive flow cache aging timeout
''',
'cache_inactive_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-timeout-rate-limit', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Specify the maximum number of entries to age
each second
''',
'cache_timeout_rate_limit',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-update-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the update flow cache aging timeout
''',
'cache_update_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('exporters', REFERENCE_CLASS, 'Exporters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters',
[], [],
''' Configure exporters to be used by the
monitor-map
''',
'exporters',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('option', REFERENCE_CLASS, 'Option' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option',
[], [],
''' Specify an option for the flow cache
''',
'option',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record', REFERENCE_CLASS, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record',
[], [],
''' Specify a flow record format
''',
'record',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable',
False,
[
_MetaInfoClassMember('flow-monitor-map', REFERENCE_LIST, 'FlowMonitorMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap',
[], [],
''' Monitor map name
''',
'flow_monitor_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map-performance-table',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow' : {
'meta_info' : _MetaInfoClass('NetFlow',
False,
[
_MetaInfoClassMember('flow-exporter-maps', REFERENCE_CLASS, 'FlowExporterMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps',
[], [],
''' Configure a flow exporter map
''',
'flow_exporter_maps',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-monitor-map-performance-table', REFERENCE_CLASS, 'FlowMonitorMapPerformanceTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable',
[], [],
''' Configure a performance traffic flow monitor map
''',
'flow_monitor_map_performance_table',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-monitor-map-table', REFERENCE_CLASS, 'FlowMonitorMapTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable',
[], [],
''' Flow monitor map configuration
''',
'flow_monitor_map_table',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-sampler-maps', REFERENCE_CLASS, 'FlowSamplerMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps',
[], [],
''' Flow sampler map configuration
''',
'flow_sampler_maps',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'net-flow',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
}
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Udp']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Destination']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable']['meta_info']
_meta_table['NetFlow.FlowExporterMaps']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
|
|
import datetime
from bitmovin import Bitmovin, Encoding, S3Output, H264CodecConfiguration, AACCodecConfiguration, H264Profile, \
StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, MuxingStream, \
S3Input, FairPlayDRM, TSMuxing, HlsManifest, AudioMedia, VariantStream
from bitmovin.errors import BitmovinError
API_KEY = '<YOUR_API_KEY>'
S3_INPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_INPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_INPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
S3_INPUT_PATH = '<YOUR_S3_INPUT_PATH>'
S3_OUTPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_OUTPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_OUTPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
FAIRPLAY_KEY = '<YOUR_FAIRPLAY_KEY>'
FAIRPLAY_IV = '<YOUR_FAIRPLAY_IV>'
FAIRPLAY_URI = '<YOUR_FAIRPLAY_LICENSING_URL>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'your/output/base/path/{}/'.format(date_component)
def main():
bitmovin = Bitmovin(api_key=API_KEY)
s3_input = S3Input(access_key=S3_INPUT_ACCESSKEY,
secret_key=S3_INPUT_SECRETKEY,
bucket_name=S3_INPUT_BUCKETNAME,
name='Sample S3 Output')
s3_input = bitmovin.inputs.S3.create(s3_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='hls fairplay example encoding - {}'.format(date_component))
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_codec_configuration_480p = H264CodecConfiguration(name='example_video_codec_configuration_480p',
bitrate=1200000,
rate=None,
height=480,
profile=H264Profile.HIGH)
video_codec_configuration_480p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_480p).resource
video_codec_configuration_360p = H264CodecConfiguration(name='example_video_codec_configuration_360p',
bitrate=800000,
rate=None,
height=360,
profile=H264Profile.HIGH)
video_codec_configuration_360p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_360p).resource
video_codec_configuration_240p = H264CodecConfiguration(name='example_video_codec_configuration_240p',
bitrate=400000,
rate=None,
height=240,
profile=H264Profile.HIGH)
video_codec_configuration_240p = bitmovin.codecConfigurations.H264.create(video_codec_configuration_240p).resource
audio_codec_configuration_stereo = AACCodecConfiguration(name='example_audio_codec_configuration_stereo',
bitrate=128000,
rate=48000)
audio_codec_configuration_stereo = bitmovin.codecConfigurations.AAC.create(
audio_codec_configuration_stereo).resource
video_input_stream = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream_en_stereo = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
video_stream_480p = Stream(codec_configuration_id=video_codec_configuration_480p.id,
input_streams=[video_input_stream],
name='Sample Stream 480p')
video_stream_480p = bitmovin.encodings.Stream.create(object_=video_stream_480p,
encoding_id=encoding.id).resource
video_stream_360p = Stream(codec_configuration_id=video_codec_configuration_360p.id,
input_streams=[video_input_stream],
name='Sample Stream 360p')
video_stream_360p = bitmovin.encodings.Stream.create(object_=video_stream_360p,
encoding_id=encoding.id).resource
video_stream_240p = Stream(codec_configuration_id=video_codec_configuration_240p.id,
input_streams=[video_input_stream],
name='Sample Stream 240p')
video_stream_240p = bitmovin.encodings.Stream.create(object_=video_stream_240p,
encoding_id=encoding.id).resource
audio_stream_en_stereo = Stream(codec_configuration_id=audio_codec_configuration_stereo.id,
input_streams=[audio_input_stream_en_stereo],
name='Sample Audio Stream EN Stereo')
audio_stream_en_stereo = bitmovin.encodings.Stream.create(object_=audio_stream_en_stereo,
encoding_id=encoding.id).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_stream_480p = MuxingStream(video_stream_480p.id)
video_muxing_stream_360p = MuxingStream(video_stream_360p.id)
video_muxing_stream_240p = MuxingStream(video_stream_240p.id)
audio_muxing_stream_en_stereo = MuxingStream(audio_stream_en_stereo.id)
video_muxing_480p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/480p',
acl=[acl_entry])
video_muxing_480p = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_480p],
name='Sample Muxing 480p')
video_muxing_480p = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_480p,
encoding_id=encoding.id).resource
fair_play_480p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_480p_output],
name='FairPlay 480p')
fair_play_480p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_480p,
encoding_id=encoding.id,
muxing_id=video_muxing_480p.id).resource
video_muxing_360p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/360p',
acl=[acl_entry])
video_muxing_360p = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_360p],
name='Sample Muxing 360p')
video_muxing_360p = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_360p,
encoding_id=encoding.id).resource
fair_play_360p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_360p_output],
name='FairPlay 360p')
fair_play_360p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_360p,
encoding_id=encoding.id,
muxing_id=video_muxing_360p.id).resource
video_muxing_240p_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'video/hls/240p',
acl=[acl_entry])
video_muxing_240p = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[video_muxing_stream_240p],
name='Sample Muxing 240p')
video_muxing_240p = bitmovin.encodings.Muxing.TS.create(object_=video_muxing_240p,
encoding_id=encoding.id).resource
fair_play_240p = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[video_muxing_240p_output],
name='FairPlay 240p')
fair_play_240p = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_240p,
encoding_id=encoding.id,
muxing_id=video_muxing_240p.id).resource
audio_muxing_output_en_stereo = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH + 'audio/hls/en_2_0',
acl=[acl_entry])
audio_muxing_en_stereo = TSMuxing(segment_length=4,
segment_naming='seg_%number%.ts',
streams=[audio_muxing_stream_en_stereo],
name='Sample Audio Muxing EN Stereo')
audio_muxing_en_stereo = bitmovin.encodings.Muxing.TS.create(object_=audio_muxing_en_stereo,
encoding_id=encoding.id).resource
fair_play_audio = FairPlayDRM(key=FAIRPLAY_KEY,
iv=FAIRPLAY_IV,
uri=FAIRPLAY_URI,
outputs=[audio_muxing_output_en_stereo],
name='FairPlay Audio')
fair_play_audio = bitmovin.encodings.Muxing.TS.DRM.FairPlay.create(object_=fair_play_audio,
encoding_id=encoding.id,
muxing_id=audio_muxing_en_stereo.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
# Manifest ##
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
hls_manifest = HlsManifest(manifest_name='example_manifest_hls.m3u8',
outputs=[manifest_output],
name='Sample HLS FairPlay Manifest')
hls_manifest = bitmovin.manifests.HLS.create(hls_manifest).resource
audio_media = AudioMedia(name='Sample Audio Media',
group_id='audio_group',
segment_path=audio_muxing_output_en_stereo.outputPath,
encoding_id=encoding.id,
stream_id=audio_stream_en_stereo.id,
muxing_id=audio_muxing_en_stereo.id,
drm_id=fair_play_audio.id,
language='en',
uri='audiomedia.m3u8')
audio_media = bitmovin.manifests.HLS.AudioMedia.create(manifest_id=hls_manifest.id, object_=audio_media).resource
variant_stream_480p = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=video_muxing_480p_output.outputPath,
uri='video_480p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_480p.id,
muxing_id=video_muxing_480p.id,
drm_id=fair_play_480p.id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_480p)
variant_stream_360p = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=video_muxing_360p_output.outputPath,
uri='video_360p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_360p.id,
muxing_id=video_muxing_360p.id,
drm_id=fair_play_360p.id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_360p)
variant_stream_240p = VariantStream(audio=audio_media.groupId,
closed_captions='NONE',
segment_path=video_muxing_240p_output.outputPath,
uri='video_240p.m3u8',
encoding_id=encoding.id,
stream_id=video_stream_240p.id,
muxing_id=video_muxing_240p.id,
drm_id=fair_play_240p.id)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=variant_stream_240p)
bitmovin.manifests.HLS.start(manifest_id=hls_manifest.id)
try:
bitmovin.manifests.HLS.wait_until_finished(manifest_id=hls_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for HLS manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
|
|
import math
import datetime
import json
import vcr
from dateutil.tz import tzutc
from behave import when, then
from hamcrest import assert_that, has_length, equal_to, has_item
@when('a search for "{keywords}" keywords is performed')
@vcr.use_cassette('fixtures/cassettes/search-keyword.yaml', record_mode='new_episodes')
def when_search_by_keyword(context, keywords):
keywords = keywords.split(', ')
assert keywords
context.events, _ = context.client.list_events(keywords=keywords)
@when('a search for "{keywords}" keywords requesting availability is performed')
@vcr.use_cassette('fixtures/cassettes/search-keyword.yaml', record_mode='new_episodes')
def when_search_by_keyword_with_availability(context, keywords):
keywords = keywords.split(', ')
assert keywords
context.events, _ = context.client.list_events(
keywords=keywords,
availability=True,
)
@when('a search for "{keywords}" keywords requesting availability with performances is performed')
@vcr.use_cassette('fixtures/cassettes/search-keyword.yaml', record_mode='new_episodes')
def when_search_by_keyword_with_availability_with_performances(context, keywords):
keywords = keywords.split(', ')
assert keywords
context.events, _ = context.client.list_events(
keywords=keywords,
availability_with_performances=True,
)
@when('a search for "{keywords}" keywords requesting extra info is performed')
@vcr.use_cassette('fixtures/cassettes/search-keyword.yaml', record_mode='new_episodes')
def when_search_by_keyword_with_extra_info(context, keywords):
keywords = keywords.split(', ')
assert keywords
context.events, _ = context.client.list_events(
keywords=keywords,
extra_info=True,
)
@when('a search for "{keywords}" keywords requesting reviews is performed')
@vcr.use_cassette('fixtures/cassettes/search-keyword.yaml', record_mode='new_episodes')
def when_search_by_keyword_with_reviews(context, keywords):
keywords = keywords.split(', ')
assert keywords
context.events, _ = context.client.list_events(
keywords=keywords,
reviews=True,
)
@when('a search for "{keywords}" keywords requesting media is performed')
@vcr.use_cassette('fixtures/cassettes/search-keyword.yaml', record_mode='new_episodes')
def when_search_by_keyword_with_media(context, keywords):
keywords = keywords.split(', ')
assert keywords
context.events, _ = context.client.list_events(
keywords=keywords,
media=True,
)
@when('a search for events with performances "{start_days}"-"{end_days}" days from now is performed')
@vcr.use_cassette('fixtures/cassettes/search-daterange.yaml', record_mode='new_episodes')
def when_search_by_daterange(context, start_days, end_days):
now = datetime.datetime.now()
start_date = now + datetime.timedelta(days=int(start_days))
end_date = now + datetime.timedelta(days=int(end_days))
context.events, _ = context.client.list_events(
start_date=start_date,
end_date=end_date,
extra_info=True,
)
@when(u'a search for events in country with code "{country_code}" is performed')
@vcr.use_cassette('fixtures/cassettes/search-country.yaml', record_mode='new_episodes')
def when_search_for_country(context, country_code):
assert country_code
context.events, _ = context.client.list_events(country_code=country_code)
@when(u'a search for events in city with code "{city_code}" is performed')
@vcr.use_cassette('fixtures/cassettes/search-city.yaml', record_mode='new_epidsodes')
def when_search_for_city(context, city_code):
assert city_code
context.events, _ = context.client.list_events(city_code=city_code)
@when(u'a search for events within "{radius}"km of "{latitude}" lat and "{longitude}" long is performed')
@vcr.use_cassette('fixtures/cassettes/search-geo.yaml', record_mode='new_episodes')
def when_search_with_geo(context, radius, latitude, longitude):
assert radius and latitude and longitude
context.events, _ = context.client.list_events(radius=radius, latitude=latitude, longitude=longitude)
@when(u'a search is performed for page 2 with a page length of 3 is performed')
@vcr.use_cassette('fixtures/cassettes/search-paginated.yaml')
def when_search_with_pages(context):
context.events, context.meta = context.client.list_events(page=2, page_length=3)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}"')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(event_ids)
@when(u'I fetch event "{event_id}"')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_event(context, event_id):
assert event_id
context.event, _ = context.client.get_event(event_id)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" requesting availability')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_availability(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(
event_ids,
availability=True,
)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" requesting availability with performances')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_availability_with_performances(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(
event_ids,
availability_with_performances=True,
)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" requesting extra info')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_extra_info(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(event_ids, extra_info=True)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" requesting reviews')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_reviews(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(event_ids, reviews=True)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" requesting media')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_media(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(event_ids, media=True)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" with add-ons')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_add_ons(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(event_ids, with_addons=True)
@when(u'we attempt to fetch events with the ID\'s "{event_ids}" with upsells')
@vcr.use_cassette('fixtures/cassettes/get-events-single.yaml', record_mode='new_episodes')
def when_get_events_with_upsells(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
context.events, _ = context.client.get_events(event_ids, with_upsells=True)
@then('a single event should be returned')
def then_a_single_event(context):
assert_that(context.events, has_length(1))
if isinstance(context.events, dict):
context.event = [event for event in context.events.values()][0]
else:
context.event = context.events[0]
@then('a list of "{num}" events should be returned')
def then_a_list_of_num_events(context, num):
assert_that(context.events, has_length(int(num)))
@then('the events all have a performance between "{start}" and "{end}" days from now')
@vcr.use_cassette('fixtures/cassettes/event-performances-date-range.yaml', record_mode='new_episodes')
def then_events_have_performance_between_days(context, start, end):
assert len(context.events) > 0
now = datetime.datetime.now(tzutc())
start_date = now + datetime.timedelta(days=int(start))
end_date = now + datetime.timedelta(days=int(end))
for event in context.events:
if not event.has_performances:
continue
performances, meta = context.client.list_performances(
event.id,
start_date=start_date,
end_date=end_date,
page=0,
page_length=20,
)
if meta.auto_select:
continue
if any(performance.date_time >= start_date and
performance.date_time <= end_date
for performance in performances):
continue
raise Exception('Event with id %s does not have a performance inside the given date range' % event.id)
@then('the events are all within "{distance}"km of "{lat}" lat and "{lon}" long')
@vcr.use_cassette('fixtures/cassettes/search-geo.yaml', record_mode='new_episodes')
def then_events_are_all_within_distance_of_lat_long(context, distance, lat, lon):
assert len(context.events) > 0
RADIUS = 6371 # km
lat = float(lat)
lon = float(lon)
distance = float(distance)
rad_lat_origin = math.radians(lat)
for event in context.events:
# Use haversine formula to determine the distance between two coords
rad_lat_event = math.radians(event.latitude)
delta_lat = math.radians(event.latitude - lat)
delta_lon = math.radians(event.longitude - lon)
a = math.sin(delta_lat/2) ** 2 + \
math.cos(rad_lat_origin) * math.cos(rad_lat_event) * \
math.sin(delta_lon/2) ** 2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
event_distance = RADIUS * c
if event_distance <= distance:
continue
raise Exception('Event with id %s and coordinates %f, %f is more than %d km from the given position' %
(event.id, event.latitude, event.longitude, distance))
@then(u'all events are in country with code "{country_code}"')
@vcr.use_cassette('fixtures/cassettes/search-country.yaml', record_mode='new_episodes')
def all_events_should_be_in_country(context, country_code):
assert len(context.events) > 0
for event in context.events:
assert_that(event.country_code, equal_to(country_code))
@then(u'all events are in city with code "{city_code}"')
@vcr.use_cassette('fixtures/cassettes/search-city.yaml', record_mode='new_epidsodes')
def all_events_should_be_in_city(context, city_code):
assert len(context.events) > 0
for event in context.events:
assert_that(event.city_code, equal_to(city_code))
@then('that event should have the ID of "{event_id}"')
def then_that_event_should_have_the_id_of_event_id(context, event_id):
assert_that(context.event.id, equal_to(event_id))
@then(u'those events should have the ID\'s "{event_ids}"')
def then_those_events_with_ids(context, event_ids):
expected_event_ids = event_ids.split(', ')
if isinstance(context.events, dict):
actual_event_ids = [event.id for event in context.events.values()]
else:
actual_event_ids = [event.id for event in context.events]
for event_id in expected_event_ids:
assert_that(actual_event_ids, has_item(event_id))
@then(u'the 7, 8 and 9th events are returned')
@vcr.use_cassette('fixtures/cassettes/events-list-all.yaml')
def then_an_event_range(context):
all_events, _ = context.client.list_events()
assert all_events
expected_event_ids = [event.id for event in all_events[6:9]]
actual_event_ids = [event.id for event in context.events]
assert expected_event_ids
assert actual_event_ids
for event_id in expected_event_ids:
assert_that(actual_event_ids, has_item(event_id))
@then('the event has availability details')
def then_the_event_has_availability(context):
assert context.event.availability_details
context.availability_details = context.event.availability_details
@then('the availability details have performance information')
def then_the_availability_details_has_performance_info(context):
now = datetime.datetime.now()
next_month = now + datetime.timedelta(days=30)
details = context.availability_details
assert details[0].is_available(
next_month.year,
next_month.month,
)
@then(u'the event has content information')
def then_the_event_has_content_info(context):
assert context.event.content
content = context.event.content
expected = json.loads(context.text)
for key, values in expected.items():
assert_that(content, has_item(key))
value = content[key]
expected_value = values['value']
expected_value_html = values['value_html']
assert_that(value.value, equal_to(expected_value))
assert_that(value.value_html, equal_to(expected_value_html))
@then(u'the event has event information')
def then_the_event_has_event_info(context):
assert context.event.event_info
assert context.event.event_info_html
expected = json.loads(context.text)
assert_that(context.event.event_info, equal_to(expected['value']))
assert_that(context.event.event_info_html, equal_to(expected['value_html']))
@then(u'the event has event information starting with')
def then_the_event_has_event_info_starting_with(context):
assert context.event.event_info
assert context.event.event_info_html
expected = json.loads(context.text)
sample_size = len(expected['value'])
assert_that(context.event.event_info[:sample_size], equal_to(expected['value']))
sample_size = len(expected['value_html'])
assert_that(context.event.event_info_html[:sample_size], equal_to(expected['value_html']))
@then(u'the event has venue information')
def then_the_event_has_venue_info(context):
assert context.event.venue_addr
assert context.event.venue_addr_html
expected = json.loads(context.text)
assert_that(context.event.venue_addr, equal_to(expected['value']))
assert_that(context.event.venue_addr_html, equal_to(expected['value_html']))
@then(u'the event has "{reviews}" reviews')
def then_the_event_has_reviews(context, reviews):
assert context.event.reviews
assert_that(context.event.reviews, has_length(int(reviews)))
@then(u'the event has media')
def then_the_event_has_media(context):
assert context.event.media
media = context.event.media
expected = json.loads(context.text)
for key, values in expected.items():
assert_that(media, has_item(key))
item = media[key]
assert_that(item.caption, equal_to(values['caption']))
assert_that(item.caption_html, equal_to(values['caption_html']))
assert_that(item.name, equal_to(values['name']))
assert_that(item.url, equal_to(values['url']))
assert_that(item.secure, equal_to(values['secure']))
assert_that(item.width, equal_to(values['width']))
assert_that(item.height, equal_to(values['height']))
@then(u'the event has add-ons')
def then_the_event_has_addons(context):
assert context.event.addon_events
@then(u'the event has upsells')
def then_the_event_has_upsells(context):
assert context.event.upsell_events
@then(u'the add-ons contain "{event_ids}"')
def then_the_add_ons_contain_event(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
addon_event_ids = [event.id for event in context.event.addon_events]
assert set(addon_event_ids) >= set(event_ids)
@then(u'the upsells contain "{event_ids}"')
def then_the_upsells_contain_event(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
upsell_event_ids = [event.id for event in context.event.upsell_events]
assert set(upsell_event_ids) >= set(event_ids)
@then(u'the upsells do not contain "{event_ids}"')
def then_the_upsells_do_not_contain_event(context, event_ids):
event_ids = event_ids.split(', ')
assert event_ids
upsell_event_ids = [event.id for event in context.event.upsell_events]
assert set(upsell_event_ids).isdisjoint(set(event_ids))
@then(u'the event needs a performance to be selected')
def then_the_event_needs_a_performance_to_be_selected(context):
assert context.event.needs_performance
@then(u'the event does not need a performance to be selected')
def then_the_event_does_not_need_a_performance_to_be_selected(context):
assert context.event.needs_performance
|
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 5 2014)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class Frm_Subject
###########################################################################
class Frm_Subject ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Problem Solver", pos = wx.DefaultPosition, size = wx.Size( 345,135 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
sz_Subject = wx.BoxSizer( wx.VERTICAL )
self.txt_subject = wx.StaticText( self, wx.ID_ANY, u"Choose Your Subject", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_subject.Wrap( -1 )
self.txt_subject.SetFont( wx.Font( 25, 70, 90, 90, True, wx.EmptyString ) )
self.txt_subject.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOWTEXT ) )
sz_Subject.Add( self.txt_subject, 0, wx.ALL, 5 )
sz_btns = wx.FlexGridSizer( 0, 2, 0, 0 )
sz_btns.SetFlexibleDirection( wx.BOTH )
sz_btns.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.btn_Thermo = wx.Button( self, wx.ID_ANY, u"Thermodynamics", wx.DefaultPosition, wx.DefaultSize, 0 )
self.btn_Thermo.SetDefault()
sz_btns.Add( self.btn_Thermo, 0, wx.ALL, 5 )
sz_Subject.Add( sz_btns, 1, wx.EXPAND, 5 )
self.SetSizer( sz_Subject )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.btn_Thermo.Bind( wx.EVT_BUTTON, self.onBtnClick_ContinueToSetup )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onBtnClick_ContinueToSetup( self, event ):
event.Skip()
###########################################################################
## Class Frm_ThermoSetup
###########################################################################
class Frm_ThermoSetup ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 304,307 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
sz_ThermoSetup = wx.BoxSizer( wx.VERTICAL )
self.tit_TS_Setup = wx.StaticText( self, wx.ID_ANY, u"Thermodynamics", wx.DefaultPosition, wx.DefaultSize, 0 )
self.tit_TS_Setup.Wrap( -1 )
sz_ThermoSetup.Add( self.tit_TS_Setup, 0, wx.ALL|wx.EXPAND, 5 )
sz_TS_Setups = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TS_Setups.SetFlexibleDirection( wx.BOTH )
sz_TS_Setups.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TS_Medium = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TS_Medium.SetFlexibleDirection( wx.BOTH )
sz_TS_Medium.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TS_Medium = wx.StaticText( self, wx.ID_ANY, u"Medium", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Medium.Wrap( -1 )
sz_TS_Medium.Add( self.txt_TS_Medium, 0, wx.ALL, 5 )
self.btn_TS_Medium1 = wx.RadioButton( self, wx.ID_ANY, u"Ideal Gas", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.btn_TS_Medium1.SetValue( True )
sz_TS_Medium.Add( self.btn_TS_Medium1, 0, wx.ALL, 5 )
self.btn_TS_Medium2 = wx.RadioButton( self, wx.ID_ANY, u"Water", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_Medium2, 0, wx.ALL, 5 )
self.btn_TS_Medium3 = wx.RadioButton( self, wx.ID_ANY, u"R-132a", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_Medium3, 0, wx.ALL, 5 )
self.txt_TS_System = wx.StaticText( self, wx.ID_ANY, u"System", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_System.Wrap( -1 )
sz_TS_Medium.Add( self.txt_TS_System, 0, wx.ALL, 5 )
self.btn_TS_System1 = wx.RadioButton( self, wx.ID_ANY, u"Closed", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.btn_TS_System1.SetValue( True )
sz_TS_Medium.Add( self.btn_TS_System1, 0, wx.ALL, 5 )
self.btn_TS_System2 = wx.RadioButton( self, wx.ID_ANY, u"Steady", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_System2, 0, wx.ALL, 5 )
self.btn_TS_System3 = wx.RadioButton( self, wx.ID_ANY, u"Unsteady", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Medium.Add( self.btn_TS_System3, 0, wx.ALL, 5 )
sz_TS_Setups.Add( sz_TS_Medium, 1, wx.EXPAND, 5 )
sz_TS_Container = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TS_Container.SetFlexibleDirection( wx.BOTH )
sz_TS_Container.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TS_Container = wx.StaticText( self, wx.ID_ANY, u"Container", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Container.Wrap( -1 )
sz_TS_Container.Add( self.txt_TS_Container, 0, wx.ALL, 5 )
self.btn_TS_Container1 = wx.RadioButton( self, wx.ID_ANY, u"Rigid", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.btn_TS_Container1.SetValue( True )
sz_TS_Container.Add( self.btn_TS_Container1, 0, wx.ALL, 5 )
self.btn_TS_Container2 = wx.RadioButton( self, wx.ID_ANY, u"Piston", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container2, 0, wx.ALL, 5 )
self.btn_TS_Container3 = wx.RadioButton( self, wx.ID_ANY, u"Membrane", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container3, 0, wx.ALL, 5 )
self.btn_TS_Container4 = wx.RadioButton( self, wx.ID_ANY, u"Nozzle", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container4, 0, wx.ALL, 5 )
self.btn_TS_Container5 = wx.RadioButton( self, wx.ID_ANY, u"Turbine", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container5, 0, wx.ALL, 5 )
self.btn_TS_Container6 = wx.RadioButton( self, wx.ID_ANY, u"Heat Exchanger", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container6, 0, wx.ALL, 5 )
self.btn_TS_Container7 = wx.RadioButton( self, wx.ID_ANY, u"Mixing Chaimber", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TS_Container.Add( self.btn_TS_Container7, 0, wx.ALL, 5 )
sz_TS_Setups.Add( sz_TS_Container, 1, wx.EXPAND, 5 )
sz_TI_Etc = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TI_Etc.SetFlexibleDirection( wx.BOTH )
sz_TI_Etc.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TS_Etc = wx.StaticText( self, wx.ID_ANY, u"Etc", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Etc.Wrap( -1 )
sz_TI_Etc.Add( self.txt_TS_Etc, 0, wx.ALL, 5 )
self.btn_TS_Adiabadic = wx.CheckBox( self, wx.ID_ANY, u"Adiabadic", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Adiabadic, 0, wx.ALL, 5 )
self.btn_TS_Isothermal = wx.CheckBox( self, wx.ID_ANY, u"Isothermal", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Isothermal, 0, wx.ALL, 5 )
self.btn_TS_Reversable = wx.CheckBox( self, wx.ID_ANY, u"Reversable", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Reversable, 0, wx.ALL, 5 )
self.btn_TS_Polytropic = wx.CheckBox( self, wx.ID_ANY, u"Polytropic", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Polytropic, 0, wx.ALL, 5 )
self.btn_TS_Valve = wx.CheckBox( self, wx.ID_ANY, u"Valve", wx.DefaultPosition, wx.DefaultSize, 0 )
sz_TI_Etc.Add( self.btn_TS_Valve, 0, wx.ALL, 5 )
self.txt_TS_Units = wx.StaticText( self, wx.ID_ANY, u"Units", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TS_Units.Wrap( -1 )
sz_TI_Etc.Add( self.txt_TS_Units, 0, wx.ALL, 5 )
units_TS_ChooseChoices = []
self.units_TS_Choose = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 80,-1 ), units_TS_ChooseChoices, 0 )
self.units_TS_Choose.SetSelection( 0 )
sz_TI_Etc.Add( self.units_TS_Choose, 0, wx.ALL, 5 )
sz_TS_Setups.Add( sz_TI_Etc, 1, wx.EXPAND, 5 )
sz_ThermoSetup.Add( sz_TS_Setups, 1, wx.EXPAND, 5 )
self.btn_TS_Continue = wx.Button( self, wx.ID_ANY, u"Continue", wx.DefaultPosition, wx.DefaultSize, 0 )
self.btn_TS_Continue.SetDefault()
sz_ThermoSetup.Add( self.btn_TS_Continue, 0, wx.ALL, 5 )
self.SetSizer( sz_ThermoSetup )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.btn_TS_Medium1.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Medium_IdealGas )
self.btn_TS_Medium2.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Medium_Water )
self.btn_TS_Medium3.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Medium_R132 )
self.btn_TS_System1.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_System_Closed )
self.btn_TS_System2.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_System_Steady )
self.btn_TS_System3.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_System_Unsteady )
self.btn_TS_Container1.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Rigid )
self.btn_TS_Container2.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Piston )
self.btn_TS_Container3.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Membrane )
self.btn_TS_Container4.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Nozzle )
self.btn_TS_Container5.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Turbine )
self.btn_TS_Container6.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_HeatExch )
self.btn_TS_Container7.Bind( wx.EVT_RADIOBUTTON, self.onBtnClick_Container_Mixing )
self.btn_TS_Adiabadic.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Adiabadic )
self.btn_TS_Isothermal.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Isothermal )
self.btn_TS_Reversable.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Reversable )
self.btn_TS_Polytropic.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Polytropic )
self.btn_TS_Valve.Bind( wx.EVT_CHECKBOX, self.onBtnClick_Etc_Valve )
self.units_TS_Choose.Bind( wx.EVT_CHOICE, self.onUnits_TS_Choice )
self.btn_TS_Continue.Bind( wx.EVT_BUTTON, self.onBtnClick_ContinueToInput )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onBtnClick_Medium_IdealGas( self, event ):
event.Skip()
def onBtnClick_Medium_Water( self, event ):
event.Skip()
def onBtnClick_Medium_R132( self, event ):
event.Skip()
def onBtnClick_System_Closed( self, event ):
event.Skip()
def onBtnClick_System_Steady( self, event ):
event.Skip()
def onBtnClick_System_Unsteady( self, event ):
event.Skip()
def onBtnClick_Container_Rigid( self, event ):
event.Skip()
def onBtnClick_Container_Piston( self, event ):
event.Skip()
def onBtnClick_Container_Membrane( self, event ):
event.Skip()
def onBtnClick_Container_Nozzle( self, event ):
event.Skip()
def onBtnClick_Container_Turbine( self, event ):
event.Skip()
def onBtnClick_Container_HeatExch( self, event ):
event.Skip()
def onBtnClick_Container_Mixing( self, event ):
event.Skip()
def onBtnClick_Etc_Adiabadic( self, event ):
event.Skip()
def onBtnClick_Etc_Isothermal( self, event ):
event.Skip()
def onBtnClick_Etc_Reversable( self, event ):
event.Skip()
def onBtnClick_Etc_Polytropic( self, event ):
event.Skip()
def onBtnClick_Etc_Valve( self, event ):
event.Skip()
def onUnits_TS_Choice( self, event ):
event.Skip()
def onBtnClick_ContinueToInput( self, event ):
event.Skip()
###########################################################################
## Class Frm_ThermoInput
###########################################################################
class Frm_ThermoInput ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 856,300 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
siz_ThermoInput_Title = wx.BoxSizer( wx.VERTICAL )
self.tit_TI_Input = wx.StaticText( self, wx.ID_ANY, u"Inputs", wx.DefaultPosition, wx.DefaultSize, 0 )
self.tit_TI_Input.Wrap( -1 )
siz_ThermoInput_Title.Add( self.tit_TI_Input, 0, wx.ALL, 5 )
sz_ThermoInput_Inputs = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_ThermoInput_Inputs.SetFlexibleDirection( wx.BOTH )
sz_ThermoInput_Inputs.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TI_State1 = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TI_State1.SetFlexibleDirection( wx.BOTH )
sz_TI_State1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_State1 = wx.StaticText( self, wx.ID_ANY, u"State 1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_State1.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_State1, 0, wx.ALL, 5 )
self.txt_TI_spacer11 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer11.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_spacer11, 0, wx.ALL, 5 )
self.txt_TI_spacer12 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer12.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_spacer12, 0, wx.ALL, 5 )
self.txt_TI_P1 = wx.StaticText( self, wx.ID_ANY, u"P1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_P1.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_P1, 0, wx.ALL, 5 )
self.val_TI_P1 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State1.Add( self.val_TI_P1, 0, wx.ALL, 5 )
unit_TI_P1Choices = []
self.unit_TI_P1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), unit_TI_P1Choices, 0 )
self.unit_TI_P1.SetSelection( 0 )
sz_TI_State1.Add( self.unit_TI_P1, 0, wx.ALL, 5 )
self.txt_TI_V1 = wx.StaticText( self, wx.ID_ANY, u"V1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_V1.Wrap( -1 )
sz_TI_State1.Add( self.txt_TI_V1, 0, wx.ALL, 5 )
self.val_TI_V1 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State1.Add( self.val_TI_V1, 0, wx.ALL, 5 )
unit_TI_V1Choices = []
self.unit_TI_V1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), unit_TI_V1Choices, 0 )
self.unit_TI_V1.SetSelection( 0 )
sz_TI_State1.Add( self.unit_TI_V1, 0, wx.ALL, 5 )
sz_ThermoInput_Inputs.Add( sz_TI_State1, 1, wx.EXPAND, 5 )
sz_TI_State2 = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TI_State2.SetFlexibleDirection( wx.BOTH )
sz_TI_State2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_State2 = wx.StaticText( self, wx.ID_ANY, u"State 2", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_State2.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_State2, 0, wx.ALL, 5 )
self.txt_TI_spacer21 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer21.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_spacer21, 0, wx.ALL, 5 )
self.txt_TI_spacer22 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_spacer22.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_spacer22, 0, wx.ALL, 5 )
self.txt_TI_P2 = wx.StaticText( self, wx.ID_ANY, u"P2", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_P2.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_P2, 0, wx.ALL, 5 )
self.val_TI_P2 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State2.Add( self.val_TI_P2, 0, wx.ALL, 5 )
unit_TI_P2Choices = []
self.unit_TI_P2 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), unit_TI_P2Choices, 0 )
self.unit_TI_P2.SetSelection( 0 )
sz_TI_State2.Add( self.unit_TI_P2, 0, wx.ALL, 5 )
self.txt_TI_V2 = wx.StaticText( self, wx.ID_ANY, u"V2", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_V2.Wrap( -1 )
sz_TI_State2.Add( self.txt_TI_V2, 0, wx.ALL, 5 )
self.val_TI_V2 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_State2.Add( self.val_TI_V2, 0, wx.ALL, 5 )
m_choice4Choices = []
self.m_choice4 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), m_choice4Choices, 0 )
self.m_choice4.SetSelection( 0 )
sz_TI_State2.Add( self.m_choice4, 0, wx.ALL, 5 )
sz_ThermoInput_Inputs.Add( sz_TI_State2, 1, wx.EXPAND, 5 )
sz_TI_OtherMain = wx.FlexGridSizer( 0, 1, 0, 0 )
sz_TI_OtherMain.SetFlexibleDirection( wx.BOTH )
sz_TI_OtherMain.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
sz_TI_OtherTitle = wx.FlexGridSizer( 0, 2, 0, 0 )
sz_TI_OtherTitle.SetFlexibleDirection( wx.BOTH )
sz_TI_OtherTitle.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_Other = wx.StaticText( self, wx.ID_ANY, u"Other", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_Other.Wrap( -1 )
sz_TI_OtherTitle.Add( self.txt_TI_Other, 0, wx.ALL, 5 )
self.m_staticText24 = wx.StaticText( self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText24.Wrap( -1 )
sz_TI_OtherTitle.Add( self.m_staticText24, 0, wx.ALL, 5 )
self.m_staticText25 = wx.StaticText( self, wx.ID_ANY, u"MyLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText25.Wrap( -1 )
sz_TI_OtherTitle.Add( self.m_staticText25, 0, wx.ALL, 5 )
m_choice8Choices = []
self.m_choice8 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice8Choices, 0 )
self.m_choice8.SetSelection( 0 )
sz_TI_OtherTitle.Add( self.m_choice8, 0, wx.ALL, 5 )
sz_TI_OtherMain.Add( sz_TI_OtherTitle, 1, wx.EXPAND, 5 )
sz_TI_Other = wx.FlexGridSizer( 0, 3, 0, 0 )
sz_TI_Other.SetFlexibleDirection( wx.BOTH )
sz_TI_Other.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.txt_TI_W = wx.StaticText( self, wx.ID_ANY, u"W", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_W.Wrap( -1 )
sz_TI_Other.Add( self.txt_TI_W, 0, wx.ALL, 5 )
self.val_TI_W = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_Other.Add( self.val_TI_W, 0, wx.ALL, 5 )
m_choice5Choices = []
self.m_choice5 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), m_choice5Choices, 0 )
self.m_choice5.SetSelection( 0 )
sz_TI_Other.Add( self.m_choice5, 0, wx.ALL, 5 )
self.txt_TI_Q = wx.StaticText( self, wx.ID_ANY, u"Q", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_TI_Q.Wrap( -1 )
sz_TI_Other.Add( self.txt_TI_Q, 0, wx.ALL, 5 )
self.val_TI_Q = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
sz_TI_Other.Add( self.val_TI_Q, 0, wx.ALL, 5 )
m_choice6Choices = []
self.m_choice6 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 60,-1 ), m_choice6Choices, 0 )
self.m_choice6.SetSelection( 0 )
sz_TI_Other.Add( self.m_choice6, 0, wx.ALL, 5 )
sz_TI_OtherMain.Add( sz_TI_Other, 1, wx.EXPAND, 5 )
sz_ThermoInput_Inputs.Add( sz_TI_OtherMain, 1, wx.EXPAND, 5 )
siz_ThermoInput_Title.Add( sz_ThermoInput_Inputs, 1, wx.EXPAND, 5 )
self.btn_TI_Continue = wx.Button( self, wx.ID_ANY, u"Continue", wx.DefaultPosition, wx.DefaultSize, 0 )
siz_ThermoInput_Title.Add( self.btn_TI_Continue, 0, wx.ALL, 5 )
self.SetSizer( siz_ThermoInput_Title )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.val_TI_P1.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_P1 )
self.unit_TI_P1.Bind( wx.EVT_CHOICE, self.onUnit_Chose )
self.val_TI_V1.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_V1 )
self.val_TI_P2.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_P2 )
self.val_TI_V2.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_V2 )
self.val_TI_W.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_W )
self.val_TI_Q.Bind( wx.EVT_TEXT_ENTER, self.onVal_TI_Q )
self.btn_TI_Continue.Bind( wx.EVT_BUTTON, self.onBtnClick_ContinueToResults )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onVal_TI_P1( self, event ):
event.Skip()
def onUnit_Chose( self, event ):
event.Skip()
def onVal_TI_V1( self, event ):
event.Skip()
def onVal_TI_P2( self, event ):
event.Skip()
def onVal_TI_V2( self, event ):
event.Skip()
def onVal_TI_W( self, event ):
event.Skip()
def onVal_TI_Q( self, event ):
event.Skip()
def onBtnClick_ContinueToResults( self, event ):
event.Skip()
|
|
import sys
class rect:
def __init__(self, x = 0, y = 0, w = 0, h = 0):
self.x = x
self.y = y
self.w = w
self.h = h
def size(self):
return self.w, self.h
def pos(self):
return self.x, self.y
def xyxy(self):
return self.x, self.y, self.get_right(), self.get_bottom()
def get_left(self):
return self.x
def get_right(self):
return self.x + self.w
def get_bottom(self):
return self.y + self.h
def get_top(self):
return self.y
def is_empty(self):
return self.w <=0 or self.h <= 0
def is_contains(self, r):
return r.x >= self.x and r.y >= self.y and\
r.get_bottom() <= self.get_bottom() and\
r.get_right() <= self.get_right()
def is_contained(self, r):
return self.x >= r.x and self.y >= r.y and\
self.get_bottom() <= r.get_bottom() and\
self.get_right() <= r.get_right()
def get_intersection(self, rc):
c = rect(self.x, self.y, self.w, self.h)
c.x = max(self.x, rc.x)
c.y = max(self.y, rc.y)
r = min(self.get_right(), rc.get_right())
b = min(self.get_bottom(), rc.get_bottom())
c.w = r - c.x
c.h = b - c.y
return c
def get_clipX(self, x):
r1 = rect(self.x, self.y, x - self.x, self.h)
r2 = rect(x, self.y, self.get_right() - x, self.h)
r1.w = min(self.w, max(0, r1.w))
r2.w = min(self.w, max(0, r2.w))
return r1, r2
def get_clipY(self, y):
r1 = rect(self.x, self.y, self.w, y - self.y)
r2 = rect(self.x, y, self.w, self.get_bottom() - y)
r1.h = min(self.h, max(0, r1.h))
r2.h = min(self.h, max(0, r2.h))
return r1, r2
def __repr__(self):
return "(%d, %d, %d, %d)" % (self.x, self.y, self.w, self.h)
class frame:
def __init__(self, w, h):
self.w = w
self.h = h
class MyException(Exception):
def __init__(self, free):
self.free = free
class Node:
def __init__(self, rect, atl, data):
self.rect = rect
self.atlas = atl
self.data = data
class Atlas:
n = 0
def __init__(self, padding, w, h):
self.w = w
self.h = h
self.free_rects = []
self.free_rects.append(rect(padding, padding, w - padding, h - padding))
self.nodes = []
Atlas.n += 1
@staticmethod
def optimize(clipped):
res = []
append = res.append
for r in clipped:
add = True
is_contained = r.is_contained
for t in clipped:
if t == r:
continue
if is_contained(t):
add = False
break
if add:
append(r)
return res
def save(self):
#return
from PIL import Image, ImageDraw
#SCALE = 50
#OFFSET = 5
SCALE = 1
OFFSET = 0
def conv(*xyxy):
return xyxy[0] * SCALE, xyxy[1] * SCALE, xyxy[2] * SCALE, xyxy[3] * SCALE
def conv_in(*xyxy):
return xyxy[0] * SCALE + OFFSET, xyxy[1] * SCALE + OFFSET, \
xyxy[2] * SCALE - OFFSET, xyxy[3] * SCALE - OFFSET
im = Image.new("RGBA", (self.w * SCALE, self.h * SCALE))
draw = ImageDraw.Draw(im)
draw.rectangle(conv(0, 0, self.w, self.h), fill="white")
for src_rect in self.rects:
draw.rectangle(conv(*src_rect.xyxy()), fill = "red", outline="black")
imcopy = im.copy()
drawcopy = ImageDraw.Draw(imcopy)
for fr in self.free_rects:
rim = Image.new("RGBA", im.size)
rimdraw = ImageDraw.Draw(rim)
rimdraw.rectangle(conv_in(*fr.xyxy()), fill = "green", outline="black")
mask = Image.new("RGBA", im.size, "white")
maskdraw = ImageDraw.Draw(mask)
maskdraw.rectangle(conv_in(*fr.xyxy()), fill = "#c01010")
mask = mask.split()[0]
imcopy = Image.composite(imcopy, rim, mask)
imcopy.save("png/%s.png" %(self.n, ))
def add(self, w, h, data):
dest_rect = None
mn = 0xffffffffffffffff
for r in self.free_rects:
if r.w >= w and r.h >= h:
#v = min((r.w - w), (r.h - h))
#v = r.w * r.h - w * h
v = r.y * r.x * r.x
if v < mn:
#if 1:
mn = v
dest_rect = r
# break
#break
if not dest_rect:
#self.save()
return None
src_rect = rect(dest_rect.x, dest_rect.y, w, h)
clipped = []
append = clipped.append
for r in self.free_rects:
if r.get_intersection(src_rect).is_empty():
append(r)
continue
r1, _ = r.get_clipX(src_rect.get_left())
_, r2 = r.get_clipX(src_rect.get_right())
r3, _ = r.get_clipY(src_rect.get_top())
_, r4 = r.get_clipY(src_rect.get_bottom())
if not r1.is_empty():
append(r1)
if not r2.is_empty():
append(r2)
if not r3.is_empty():
append(r3)
if not r4.is_empty():
append(r4)
self.free_rects = self.optimize(clipped)
node = Node(src_rect, self, data)
self.nodes.append(node)
return node
if __name__ == "__main__":
import random
r = rect(0, 0, 10, 10)
fr = []
random.seed(0)
for x in xrange(200):
fr.append(frame(random.randint(10,60), random.randint(10,60)))
fr = [frame(1, 2), frame(2, 3), frame(2, 3), frame(3, 3), frame(8, 2), frame(4, 1), frame(4, 2), frame(1, 1), frame(3, 3),frame(3, 3),frame(3, 3),]
#fr = [frame(2, 2), frame(3, 3), frame(2, 2), ]
#fr = [frame(1, 1), frame(6, 3), frame(8, 1), frame(2,2)]
#fr = fr #30223
#fr = sorted(fr, key = lambda v: -max(v.h, v.w)) #21450
#fr = sorted(fr, key = lambda v: -v.h * v.w) #22492
#fr = sorted(fr, key = lambda v: -v.h) #21880
#fr = sorted(fr, key = lambda v: -v.w) #20573
im = Image.new("RGBA", (r.w * SCALE, r.h * SCALE))
draw = ImageDraw.Draw(im)
draw.rectangle(conv(0,0,r.w,r.h), fill="white")
exc = ""
atlas = Atlas(0, r.w, r.h)
for f in fr:
node = atlas.add(f.w, f.h)
if not node:
break
s = 0
for f in fr:
s += f.w * f.h
print "left: " + str(s)
im.save("image-%s.png" %(s, ))
|
|
from __future__ import unicode_literals
import io
import os
import subprocess
import time
from .common import AudioConversionError, PostProcessor
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
subtitles_filename,
dfxp2srt,
ISO639Utils,
)
EXT_TO_OUT_FORMATS = {
"aac": "adts",
"m4a": "ipod",
"mka": "matroska",
"mkv": "matroska",
"mpg": "mpeg",
"ogv": "ogg",
"ts": "mpegts",
"wma": "asf",
"wmv": "asf",
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor(downloader)._versions
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
if self._downloader:
prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
self._downloader.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without avconv/ffmpeg.' % (location))
self._versions = {}
return
elif not os.path.isdir(location):
basename = os.path.splitext(os.path.basename(location))[0]
if basename not in programs:
self._downloader.report_warning(
'Cannot identify executable %s, its basename should be one of %s. '
'Continuing without avconv/ffmpeg.' %
(location, ', '.join(programs)))
self._versions = {}
return None
location = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(location, p)) for p in programs)
self._versions = dict(
(p, get_exe_version(self._paths[p], args=['-version']))
for p in programs)
if self._versions is None:
self._versions = dict(
(p, get_exe_version(p, args=['-version'])) for p in programs)
self._paths = dict((p, p) for p in programs)
if prefer_ffmpeg:
prefs = ('ffmpeg', 'avconv')
else:
prefs = ('avconv', 'ffmpeg')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg:
prefs = ('ffprobe', 'avprobe')
else:
prefs = ('avprobe', 'ffprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
def get_audio_codec(self, path):
if not self.probe_available:
raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
try:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams'),
encodeFilename(self._ffmpeg_filename_argument(path), True)]
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd)))
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
opts += self._configuration_args()
files_cmd = []
for path in input_paths:
files_cmd.extend([
encodeArgument('-i'),
encodeFilename(self._ffmpeg_filename_argument(path), True)
])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
self.try_utime(out_path, oldest_mtime, oldest_mtime)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
return 'file:' + fn if fn != '-' else fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
information['filepath'] = new_path
information['ext'] = extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information
try:
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
return [path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
if information['ext'] == self._preferedformat:
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return [], information
options = []
if self._preferedformat == 'avi':
options.extend(['-c:v', 'libxvid', '-vtag', 'XVID'])
prefix, sep, ext = path.rpartition('.')
outpath = prefix + sep + self._preferedformat
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, options)
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return [path], information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def run(self, information):
if information['ext'] not in ('mp4', 'webm', 'mkv'):
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files')
return [], information
subtitles = information.get('requested_subtitles')
if not subtitles:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
return [], information
filename = information['filepath']
ext = information['ext']
sub_langs = []
sub_filenames = []
webm_vtt_warn = False
for lang, sub_info in subtitles.items():
sub_ext = sub_info['ext']
if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_filenames.append(subtitles_filename(filename, lang, sub_ext))
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files')
if not sub_langs:
return [], information
input_files = [filename] + sub_filenames
opts = [
'-map', '0',
'-c', 'copy',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
]
if information['ext'] == 'mp4':
opts += ['-c:s', 'mov_text']
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
temp_filename = prepend_extension(filename, 'temp')
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return sub_filenames, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
def add(meta_list, info_list=None):
if not info_list:
info_list = meta_list
if not isinstance(meta_list, (list, tuple)):
meta_list = (meta_list,)
if not isinstance(info_list, (list, tuple)):
info_list = (info_list,)
for info_f in info_list:
if info.get(info_f) is not None:
for meta_f in meta_list:
metadata[meta_f] = info[info_f]
break
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'comment'), 'description')
add('purl', 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
if not metadata:
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if info['ext'] == 'm4a':
options = ['-vn', '-acodec', 'copy']
else:
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'youtube-dl will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
return False
return True
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio is None or stretched_ratio == 1:
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM4aPP(FFmpegPostProcessor):
def run(self, info):
if info.get('container') != 'm4a_dash':
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4']
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM3u8PP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
if self.get_audio_codec(filename) == 'aac':
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
self._downloader.to_screen('[ffmpeg] Fixing malformated aac bitstream in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
filename = info['filepath']
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
return [], info
self._downloader.to_screen('[ffmpeg] Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
ext = sub['ext']
if ext == new_ext:
self._downloader.to_screen(
'[ffmpeg] Subtitle file for %s is already in the requested'
'format' % new_ext)
continue
old_file = subtitles_filename(filename, lang, ext)
sub_filenames.append(old_file)
new_file = subtitles_filename(filename, lang, new_ext)
if ext == 'dfxp' or ext == 'ttml' or ext == 'tt':
self._downloader.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = subtitles_filename(filename, lang, 'srt')
with io.open(dfxp_file, 'rt', encoding='utf-8') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
}
return sub_filenames, info
|
|
#!/usr/bin/env python
# NoaArbel edited plot_training_log.py
# There is an option to plot two graphs in the same image (such as training and test loss vs inters)
import inspect
import os
import random
import sys
import matplotlib.cm as cmx
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import matplotlib.legend as lgd
import matplotlib.markers as mks
def get_log_parsing_script():
dirname = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
return dirname + '/parse_log.sh'
def get_log_file_suffix():
return '.log'
def get_chart_type_description_separator():
return ' vs. '
def is_x_axis_field(field):
x_axis_fields = ['Iters', 'Seconds']
return field in x_axis_fields
def create_field_index():
train_key = 'Train'
test_key = 'Test'
field_index = {train_key:{'Iters':0, 'Seconds':1, train_key + ' loss':2,
train_key + ' learning rate':3},
test_key:{'Iters':0, 'Seconds':1, test_key + ' accuracy':2,
test_key + ' loss':3}}
fields = set()
for data_file_type in field_index.keys():
fields = fields.union(set(field_index[data_file_type].keys()))
fields = list(fields)
fields.sort()
return field_index, fields
def get_supported_chart_types():
field_index, fields = create_field_index()
num_fields = len(fields)
supported_chart_types = []
# For one value plot
for i in xrange(num_fields):
if not is_x_axis_field(fields[i]):
for j in xrange(num_fields):
if i != j and is_x_axis_field(fields[j]):
str_temp = '%s%s%s' % (
fields[i], get_chart_type_description_separator(),
fields[j])
supported_chart_types.append(str_temp)
# For two-values plot
for i in xrange(num_fields):
if not is_x_axis_field(fields[i]):
for j in xrange(num_fields):
for k in xrange(num_fields):
if not is_x_axis_field(fields[k]):
if i != j and k!=j and k!=i and is_x_axis_field(fields[j]):
str_temp = '%s%s%s%s%s' % (
fields[i],' and ',fields[k],get_chart_type_description_separator(),
fields[j])
supported_chart_types.append(str_temp)
return supported_chart_types
def get_chart_type_description(chart_type):
supported_chart_types = get_supported_chart_types()
chart_type_description = supported_chart_types[chart_type]
return chart_type_description
def get_data_file_type(chart_type):
description = get_chart_type_description(chart_type)
data_file_type = description.split()[0]
description_temp = description.split(' and ')
if len(description_temp) > 1: # more then one line to plot
data_file_type2 = description_temp[1].split()[0]
else:
data_file_type2 = []
return data_file_type, data_file_type2
def get_data_file(chart_type, path_to_log):
data_file_description,data_file_description2 = get_data_file_type(chart_type)
if data_file_description2:
file1 = os.path.basename(path_to_log) + '.' + data_file_description.lower()
file12 = os.path.basename(path_to_log) + '.' + data_file_description2.lower()
else:
file1 = os.path.basename(path_to_log) + '.' + data_file_description.lower()
file12 = []
return file1,file12
def get_field_descriptions(chart_type):
description = get_chart_type_description(chart_type).split(
get_chart_type_description_separator())
description0 = description[0].split(' and ')
if len(description0) == 1:
y_axis_field_1 = description[0]
y_axis_field_2 = []
x_axis_field = description[1]
else:
if len(description0) == 2:
y_axis_field_1 = description0[0]
y_axis_field_2 = description0[1]
x_axis_field = description[1]
return x_axis_field, y_axis_field_1, y_axis_field_2
def get_field_indecies(x_axis_field, y_axis_field):
data_file_type, data_file_type2 = get_data_file_type(chart_type)
file_type = y_axis_field.split()[0] # Test or train
fields = create_field_index()[0][file_type]
return fields[x_axis_field], fields[y_axis_field]
def load_data(data_file, field_idx0, field_idx1):
data = [[], []]
with open(data_file, 'r') as f:
for line in f:
line = line.strip()
if line[0] != '#':
fields = line.split()
data[0].append(float(fields[field_idx0].strip()))
data[1].append(float(fields[field_idx1].strip()))
return data
def random_marker():
markers = mks.MarkerStyle.markers
num = len(markers.values())
idx = random.randint(0, num - 1)
return markers.values()[idx]
def get_data_label(path_to_log):
description = get_chart_type_description(chart_type).split(' and ')
if len(description) > 1: # more then one line to plot
label = description[0]
label2 = description[1].split(get_chart_type_description_separator())[0]
else:
label = description[0].split()[0]
label2 = []
return label,label2
def get_legend_loc(chart_type):
x_axis, y_axis, y_axis2= get_field_descriptions(chart_type)
loc = 'lower right'
if y_axis.find('accuracy') != -1:
pass
if y_axis.find('loss') != -1 or y_axis.find('learning rate') != -1:
loc = 'upper right'
if y_axis2:
if y_axis2.find('accuracy') != -1:
pass
if y_axis2.find('loss') != -1 or y_axis2.find('learning rate') != -1:
loc = 'upper right'
return loc
def plot_chart(chart_type, path_to_png, path_to_log_list):
for path_to_log in path_to_log_list:
os.system('%s %s' % (get_log_parsing_script(), path_to_log))
data_file, data_file2 = get_data_file(chart_type, path_to_log)
x_axis_field, y_axis_field,y_axis_field_2 = get_field_descriptions(chart_type)
if not y_axis_field_2:
x, y = get_field_indecies(x_axis_field, y_axis_field)
data = load_data(data_file, x, y)
else:
x, y = get_field_indecies(x_axis_field, y_axis_field)
x2, y2 = get_field_indecies(x_axis_field, y_axis_field_2)
data = load_data(data_file, x, y)
data2 = load_data(data_file2, x2, y2)
## TODO: more systematic color cycle for lines
color = [random.random(), random.random(), random.random()]
color2 = [random.random(), random.random(), random.random()]
label,label2 = get_data_label(chart_type)
linewidth = 0.75
## If there too many datapoints, do not use marker.
if len(data[0])> 1000:
use_marker = False
else:
use_marker = True
if not use_marker:
plt.plot(data[0], data[1], label = label, color = color,
linewidth = linewidth)
if y_axis_field_2:
plt.plot(data2[0], data2[1], label = label2, color = color2,
linewidth = linewidth)
else:
ok = False
## Some markers throw ValueError: Unrecognized marker style
while not ok:
try:
marker = random_marker()
plt.plot(data[0], data[1], label = label, color = color,
marker = marker, linewidth = linewidth)
if y_axis_field_2:
plt.plot(data2[0], data2[1], label = label2, color = color2,
marker = marker, linewidth = linewidth)
ok = True
except:
pass
legend_loc = get_legend_loc(chart_type)
plt.legend(loc = legend_loc, ncol = 1) # ajust ncol to fit the space
plt.title(get_chart_type_description(chart_type))
plt.xlabel(x_axis_field)
plt.ylabel(y_axis_field)
plt.savefig(path_to_png)
plt.show()
def print_help():
print """This script mainly serves as the basis of your customizations.
Customization is a must.
You can copy, paste, edit them in whatever way you want.
Be warned that the fields in the training log may change in the future.
You had better check the data files and change the mapping from field name to
field index in create_field_index before designing your own plots.
Usage:
./plot_training_log.py chart_type[0-%s] /where/to/save.png /path/to/first.log ...
Notes:
1. Supporting multiple logs.
2. Log file name must end with the lower-cased "%s".
Supported chart types:""" % (len(get_supported_chart_types()) - 1,
get_log_file_suffix())
supported_chart_types = get_supported_chart_types()
num = len(supported_chart_types)
for i in xrange(num):
print ' %d: %s' % (i, supported_chart_types[i])
exit
def is_valid_chart_type(chart_type):
return chart_type >= 0 and chart_type < len(get_supported_chart_types())
if __name__ == '__main__':
if len(sys.argv) < 4:
print_help()
else:
chart_type = int(sys.argv[1])
if not is_valid_chart_type(chart_type):
print_help()
path_to_png = sys.argv[2]
if not path_to_png.endswith('.png'):
print 'Path must ends with png' % path_to_png
exit
path_to_logs = sys.argv[3:]
for path_to_log in path_to_logs:
if not os.path.exists(path_to_log):
print 'Path does not exist: %s' % path_to_log
exit
if not path_to_log.endswith(get_log_file_suffix()):
print_help()
## plot_chart accpets multiple path_to_logs
plot_chart(chart_type, path_to_png, path_to_logs)
|
|
import os
import io
import ctypes
import struct
import socket
import logging
import binascii
import threading
DOES_EXIST_RESPONSE_YES = 1
DOES_EXIST_RESPONSE_NO = 2
OPCODE_GET = 1
OPCODE_DOES_EXIST = 3
OPCODE_GET_LABEL = 12
OPCODE_LIST_LABELS = 13
OPCODE_ACK = 0xAC
HASH_ALGORITHM_SHA1 = 2
class FakeServer(threading.Thread):
def __init__(self):
self._conn = None
self._sock = socket.socket()
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind(('localhost', 0))
self._sock.listen(10)
threading.Thread.__init__(self)
self.daemon = True
threading.Thread.start(self)
def port(self):
return self._sock.getsockname()[1]
def hostname(self):
return "localhost"
def run(self):
try:
while True:
self._conn, peer = self._sock.accept()
self._serve()
except:
logging.exception("Fake Server")
def readLog(self):
return ""
def _handshake(self):
self._conn.recv(4096)
self._conn.send(chr(OPCODE_ACK))
class FakeServerWithNegotiationLogic(FakeServer):
def __init__(self, clientOfServerToImitate):
super(FakeServerWithNegotiationLogic, self).__init__()
self._client = clientOfServerToImitate
self._handlerMethods = {OPCODE_LIST_LABELS: self._listLabels,
OPCODE_GET_LABEL: self._getLabel,
OPCODE_DOES_EXIST: self._doesExist}
def _serve(self):
self._handshake()
try:
while True:
message = self._conn.recv(1)
if not message:
break
opcode = ord(message[0])
if opcode not in self._handlerMethods:
break
handler = self._handlerMethods[opcode]
try:
handler()
except StopIteration:
break
finally:
self._conn.close()
def _sendStruct(self, _struct):
buf = io.BytesIO()
buf.write(_struct)
self._conn.send(buf)
def _receiveStruct(self, _struct):
raw = self._conn.recv(1024)
buf = io.BytesIO(raw)
buf.readinto(_struct)
residue = buf.read()
return residue
def _sendChunk(self, offset, payload):
chunk = ChunkHeader(offset, len(payload))
self._conn.send(chunk)
if payload:
self._conn.send(payload)
def _sendEOF(self):
self._sendChunk(0, "")
def _receiveLabel(self):
labelHeader = LabelHeader()
label = self._receiveStruct(labelHeader)
return label
def _receiveHash(self):
_hashStruct = Hash()
self._receiveStruct(_hashStruct)
_rawHash = "".join([chr(byte) for byte in _hashStruct.hash])
_hexHash = binascii.hexlify(_rawHash)
return _hexHash
def _listLabels(self):
regex = self._receiveLabel()
for label in self._client.listLabels(regex=regex):
self._sendChunk(offset=0, payload=label)
self._sendEOF()
def _getLabel(self):
label = self._receiveLabel()
labelFilePath = os.path.join(self._client._server.path, "labels", label)
with open(labelFilePath) as labelFile:
content = labelFile.read()
_hash = binascii.unhexlify(content)
hashStruct = Hash()
io.BytesIO(chr(HASH_ALGORITHM_SHA1) + _hash).readinto(hashStruct)
self._conn.send(hashStruct)
def _doesExist(self):
_hexHash = self._receiveHash()
hashFilePath = os.path.join(self._client._server.path,
_hexHash[:2],
_hexHash[2:4],
_hexHash[4:])
doesHashExists = os.path.exists(hashFilePath)
assert doesHashExists
self._conn.send(chr(DOES_EXIST_RESPONSE_YES))
def _doesExist(self):
_hash = self._receiveHash()
self._readFileByHash(_hash)
self._conn.send(chr(DOES_EXIST_RESPONSE_YES))
def _readFileByHash(self, _hash):
hashFilePath = os.path.join(self._client._server.path, _hash[:2], _hash[2:4], _hash[4:])
with open(hashFilePath) as hashFile:
content = hashFile.read()
return content
class FakeServerHangsUp(FakeServer):
def __init__(self):
super(FakeServerHangsUp, self).__init__()
def _serve(self):
self._conn.recv(4096)
self._conn.close()
class FakeServerConnectTimeout(FakeServer):
def __init__(self):
super(FakeServerConnectTimeout, self).__init__()
def port(self):
return 15652
def hostname(self):
return "35.124.99.234"
class FakeServerNotSending(FakeServer):
def __init__(self):
super(FakeServerNotSending, self).__init__()
def _serve(self):
self._handshake()
message = self._conn.recv(4096)
opcode = ord(message[0])
self._blockForever()
def _blockForever(self):
raw_input()
class FakeServerCloseAfterListLabelsOp(FakeServerWithNegotiationLogic):
def __init__(self, clientOfServerToImitate):
super(FakeServerCloseAfterListLabelsOp, self).__init__(clientOfServerToImitate)
def _listLabels(self):
raise StopIteration
class FakeServerCloseAfterGetOp(FakeServerWithNegotiationLogic):
def __init__(self, clientOfServerToImitate):
super(FakeServerCloseAfterGetOp, self).__init__(clientOfServerToImitate)
self._handlerMethods[OPCODE_GET] = self._getFile
self._getFileCounter = 0
def _getFile(self):
self._getFileCounter += 1
_hash = self._receiveHash()
content = self._readFileByHash(_hash)
if self._getFileCounter == 1:
self._sendChunk(offset=0, payload=content)
self._sendEOF()
else:
halfContent = content[:len(content) / 2]
assert len(halfContent) < len(content)
assert len(halfContent) > 0
self._sendChunk(offset=0, payload=halfContent)
raise StopIteration
class FakeServerCloseAfterExistsOp(FakeServerWithNegotiationLogic):
def __init__(self, clientOfServerToImitate):
super(FakeServerCloseAfterExistsOp, self).__init__(clientOfServerToImitate)
def _doesExist(self):
self._conn.send("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
self._conn.send("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
raise StopIteration
class ChunkHeader(ctypes.Structure):
_pack_ = 1
_fields_ = [('offset', ctypes.c_size_t),
('bytes', ctypes.c_ushort)]
class LabelHeader(ctypes.Structure):
_pack_ = 1
_fields_ = [('length', ctypes.c_ushort)]
class Hash(ctypes.Structure):
_pack_ = 1
_fields_ = [('hashAlgorithm', ctypes.c_ubyte),
('hash', ctypes.c_ubyte * 20)]
|
|
#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import ctypes
import sys
import weakref
from . import heap
from . import get_context
from .context import assert_spawning
from .five import int_types
from .reduction import ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
PY3 = sys.version_info[0] == 3
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int_types):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
ctx = kwds.pop('ctx', None)
if kwds:
raise ValueError(
'unrecognized keyword argument(s): %s' % list(kwds.keys()))
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
ctx = ctx or get_context()
lock = ctx.RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock, ctx=ctx)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
ctx = kwds.pop('ctx', None)
if kwds:
raise ValueError(
'unrecognized keyword argument(s): %s' % list(kwds.keys()))
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
ctx = ctx or get_context()
lock = ctx.RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock, ctx=ctx)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None, ctx=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
ctx = ctx or get_context()
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock, ctx)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock, ctx)
return SynchronizedArray(obj, lock, ctx)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock, ctx)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
if PY3:
buf = wrapper.create_memoryview()
obj = type_.from_buffer(buf)
else:
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec(template % ((name, ) * 7), d)
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None, ctx=None):
self._obj = obj
if lock:
self._lock = lock
else:
ctx = ctx or get_context(force=True)
self._lock = ctx.RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
with self:
return self._obj[i]
def __setitem__(self, i, value):
with self:
self._obj[i] = value
def __getslice__(self, start, stop):
with self:
return self._obj[start:stop]
def __setslice__(self, start, stop, values):
with self:
self._obj[start:stop] = values
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
|
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
File: MongoSplitter.py
Author: NYU ITP Team
Description: Will calculate splits for a given collection/database
and store/return them in MongoSplit objects
'''
from pymongo import uri_parser
from split import MongoInputSplit
from mongodisco.mongo_util import get_collection, get_connection, get_database
import logging
import bson
def calculate_splits(config):
"""reads config to find out what type of split to perform"""
#if the user does not specify an inputURI we will need to construct it from
#the db/collection name TODO
uri = config.setdefault("input_uri", "mongodb://localhost/test.in")
uri_info = uri_parser.parse_uri(uri)
db = get_database(uri)
stats = db.command("collstats", uri_info['collection'])
isSharded = stats.get('sharded', False)
useShards = config.get("use_shards", False)
useChunks = config.get("use_chunks", False)
slaveOk = config.get("slave_ok", False)
if config.get("create_input_splits"):
logging.info("Creation of Input Splits is enabled.")
if is_sharded and (use_shards or use_chunks):
if use_shards and use_chunks:
logging.warn("Combining 'use chunks' and 'read from shards \
directly' can have unexpected & erratic behavior in a live \
system due to chunk migrations. ")
logging.info("Sharding mode calculation entering.")
return calculate_sharded_splits(config, use_shards, use_chunks, uri)
# perfectly ok for sharded setups to run with a normally calculated split.
#May even be more efficient for some cases
else:
logging.info("Using Unsharded Split mode \
(Calculating multiple splits though)")
return calculate_unsharded_splits(config, uri)
else:
logging.info("Creation of Input Splits is disabled;\
Non-Split mode calculation entering.")
return calculate_single_split(config)
def calculate_unsharded_splits(config, uri):
"""@todo: Docstring for calculate_unsharded_splits
:returns: @todo
Note: collection_name seems unnecessary --CW
"""
splits = [] # will return this
logging.info("Calculating unsharded splits")
coll = get_collection(uri)
q = {} if not "query" in config else config.get("query")
# create the command to do the splits
# command to split should look like this VV
# SON([('splitVector', u'test.test_data'), ('maxChunkSize', 2),
# ('force', True), ('keyPattern', {'x': 1})])
split_key = config.get('split_key')
split_size = config.get('split_size')
full_name = coll.full_name
logging.info("Calculating unsharded splits on collection %s with Split Key %s" %
(full_name, split_key))
logging.info("Max split size :: %sMB" % split_size)
cmd = bson.son.SON()
cmd["splitVector"] = full_name
cmd["maxChunkSize"] = split_size
cmd["keyPattern"] = split_key
cmd["force"] = False
logging.debug("Issuing Command: %s" % cmd)
data = coll.database.command(cmd)
logging.debug("%r" % data)
# results should look like this
# {u'ok': 1.0, u'splitKeys': [{u'_id': ObjectId('4f49775348d9846c5e582b00')},
# {u'_id': ObjectId('4f49775548d9846c5e58553b')}]}
if data.get("err"):
raise Exception(data.get("err"))
elif data.get("ok") != 1.0:
raise Exception("Unable to calculate splits")
split_data = data.get('splitKeys')
if not split_data:
logging.warning("WARNING: No Input Splits were calculated by the split code. \
Proceeding with a *single* split. Data may be too small, try lowering \
'mongo.input.split_size' if this is undesirable.")
else:
logging.info("Calculated %s splits" % len(split_data))
last_key = None
for bound in split_data:
splits.append(_split(config, q, last_key, bound))
last_key = bound
splits.append(_split(config, q, last_key, None))
return [s.format_uri_with_query() for s in splits]
def _split(config=None, q={}, min=None, max=None):
""" constructs a split object to be used later
:returns: an actual MongoSplit object
"""
query = bson.son.SON()
query["$query"] = q
if min:
query["$min"] = min
if max:
query["$max"] = max
logging.info("Assembled Query: ", query)
return MongoInputSplit(
config.get("input_uri"),
config.get("input_key"),
query,
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True),
config.get("slave_ok",False))
def calculate_single_split(config):
splits = []
logging.info("calculating single split")
query = bson.son.SON()
splits.append(MongoInputSplit(
config.get("input_uri"),
config.get("input_key"),
query,
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True),
config.get("slave_ok",False)))
logging.debug("Calculated %d split objects" % len(splits))
logging.debug("Dump of calculated splits ... ")
for s in splits:
logging.debug(" Split: %s" % s.__str__())
return [s.format_uri_with_query() for s in splits]
def calculate_sharded_splits(config, use_shards, use_chunks, uri):
"""Calculates splits fetching them directly from a sharded setup
:returns: A list of sharded splits
"""
splits = []
if use_chunks:
splits = fetch_splits_via_chunks(config, uri, use_shards)
elif use_shards:
logging.warn("Fetching Input Splits directly from shards is potentially \
dangerous for data consistency should migrations occur during the retrieval.")
splits = fetch_splits_from_shards(config, uri)
else:
logging.error("Neither useChunks nor useShards enabled; failed to pick a valid state.")
if splits == None:
logging.error("Failed to create/calculate Input Splits from Shard Chunks; final splits content is 'None'.")
logging.debug("Calculated splits and returning them - splits: %r" % splits)
return splits
def fetch_splits_from_shards(config, uri):
"""Internal method to fetch splits from shareded db
:returns: The splits
"""
logging.warn("WARNING getting splits that connect directly to the backend mongods is risky and might not produce correct results")
connection = get_connection(uri)
configDB = connection["config"]
shardsColl = configDB["shards"]
shardSet = []
splits = []
cur = shardsColl.find()
for row in cur:
host = row.get('host')
slashIndex = host.find("/")
if slashIndex > 0:
host = host[slashIndex + 1:]
shardSet.append(host)
splits = []
for host in shardSet:
new_uri = get_new_URI(uri,host)
config['input_uri'] = new_uri
splits += calculate_unsharded_splits(config,new_uri)
#I think this is better than commented way
return splits
'''
splits.append(MongoInputSplit(new_uri,
config.get("input_key"),
config.get("query"),
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True)))
return [s.format_uri_with_query() for s in splits]
'''
def fetch_splits_via_chunks(config, uri, use_shards):
"""Retrieves split objects based on chunks in mongo
:returns: The splits
"""
originalQuery = config.get("query")
if use_shards:
logging.warn("WARNING getting splits that connect directly to the \
backend mongods is risky and might not produce correct results")
logging.debug("fetch_splits_via_chunks: originalQuery: %s" % originalQuery)
connection = get_connection(uri)
configDB = connection["config"]
shardMap = {}
if use_shards:
shardsColl = configDB["shards"]
cur = shardsColl.find()
for row in cur:
host = row.get('host')
slashIndex = host.find("/")
if slashIndex > 0:
host = host[slashIndex + 1:]
shardMap[row.get('_id')] = host
logging.debug("MongoInputFormat.getSplitsUsingChunks(): shard map is: %s" % shardMap)
chunksCollection = configDB["chunks"]
logging.info(configDB.collection_names())
query = bson.son.SON()
uri_info = uri_parser.parse_uri(uri)
query["ns"] = uri_info['database'] + '.' + uri_info['collection']
cur = chunksCollection.find(query)
logging.info("query is ", query)
logging.info(cur.count())
logging.info(chunksCollection.find().count())
numChunks = 0
splits = []
for row in cur:
numChunks += 1
minObj = row.get('min')
shardKeyQuery = bson.son.SON()
min = bson.son.SON()
max = bson.son.SON()
for key in minObj:
tMin = minObj[key]
tMax = (row.get('max'))[key]
#@to-do do type comparison first?
min[key] = tMin
max[key] = tMax
if originalQuery == None:
originalQuery = bson.son.SON()
shardKeyQuery["$query"] = originalQuery
shardKeyQuery["$min"] = min
shardKeyQuery["$max"] = max
inputURI = config.get("input_uri")
if use_shards:
shardName = row.get('shard')
host = shardMap[shardName]
inputURI = get_new_URI(inputURI, host)
splits.append(MongoInputSplit(
inputURI,
config.get("input_key"),
shardKeyQuery,
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True),
config.get("slave_ok",False)))
# return splits in uri format for disco
return [s.format_uri_with_query() for s in splits]
def get_new_URI(original_URI, new_URI):
"""
:returns: a new Mongo_URI
"""
MONGO_URI_PREFIX = "mongodb://"
orig_URI_string = original_URI[len(MONGO_URI_PREFIX):]
server_end = -1
server_start = 0
"""to find the last index of / in the original URI string """
idx = orig_URI_string.rfind("/")
if idx < 0:
server_end = len(orig_URI_string)
else:
server_end = idx
idx = orig_URI_string.find("@")
server_start = idx + 1
sb = orig_URI_string[0:server_start] + new_URI + orig_URI_string[server_end:]
ans = MONGO_URI_PREFIX + sb
logging.debug("get_new_URI(): original " + original_URI + " new uri: " + ans)
return ans
|
|
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from wherehows.common import Constant
from com.ziclix.python.sql import zxJDBC
import DbUtil
import sys
import json
import urllib
import urllib2
from org.slf4j import LoggerFactory
class ElasticSearchIndex():
def __init__(self, args):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.elasticsearch_index_url = args[Constant.WH_ELASTICSEARCH_URL_KEY]
self.elasticsearch_port = args[Constant.WH_ELASTICSEARCH_PORT_KEY]
self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY],
args[Constant.WH_DB_USERNAME_KEY],
args[Constant.WH_DB_PASSWORD_KEY],
args[Constant.WH_DB_DRIVER_KEY])
self.wh_cursor = self.wh_con.cursor()
def bulk_insert(self, params, url):
try:
req = urllib2.Request(url=url)
req.add_header('Content-type', 'application/json')
req.get_method = lambda: "PUT"
req.add_data('\n'.join(params) + '\n')
response = urllib2.urlopen(req)
data = json.load(response)
if str(data['errors']) != 'False':
self.logger.info(str(data))
except urllib2.HTTPError as e:
self.logger.error(str(e.code))
self.logger.error(e.read())
def update_dataset_field(self, last_time=None):
if last_time:
sql = """
SELECT * FROM dict_field_detail WHERE modified >= DATE_SUB(%s, INTERVAL 1 HOUR)
""" % last_time
else:
sql = """
SELECT * FROM dict_field_detail
"""
comment_query = """
SELECT d.field_id, d.dataset_id, f.comment FROM dict_dataset_field_comment d
LEFT JOIN field_comments f ON d.comment_id = f.id WHERE d.field_id = %d
"""
url = self.elasticsearch_index_url + ':' + str(self.elasticsearch_port) + '/wherehows/field/_bulk'
params = []
self.wh_cursor.execute(sql)
rows = DbUtil.copy_dict_cursor(self.wh_cursor)
row_count = 1
for row in rows:
self.wh_cursor.execute(comment_query % long(row['field_id']))
comments = []
comment_rows = DbUtil.copy_dict_cursor(self.wh_cursor)
for comment_row in comment_rows:
comments.append(comment_row['comment'])
params.append('{ "index": { "_id": ' +
str(row['field_id']) + ', "parent": ' + str(row['dataset_id']) + ' }}')
if len(comments) > 0:
params.append(
"""{ "comments": %s, "dataset_id": %d, "sort_id": %d, "field_name": "%s", "parent_path": "%s"}"""
% (json.dumps(comments) if comments else '', row['dataset_id'] if row['dataset_id'] else 0,
row['sort_id'] if row['sort_id'] else 0,
row['field_name'] if row['field_name'] else '', row['parent_path'] if row['parent_path'] else ''))
else:
params.append(
"""{ "comments": "", "dataset_id": %d, "sort_id": %d, "field_name": "%s", "parent_path": "%s"}"""
% (row['dataset_id'] if row['dataset_id'] else 0, row['sort_id'] if row['sort_id'] else 0,
row['field_name'] if row['field_name'] else '', row['parent_path'] if row['parent_path'] else ''))
if row_count % 1000 == 0:
self.bulk_insert(params, url)
params = []
row_count += 1
if len(params) > 0:
self.bulk_insert(params, url)
def update_comment(self, last_time=None):
if last_time:
sql = """
SELECT * FROM comments WHERE modified >= DATE_SUB(%s, INTERVAL 1 HOUR)
""" % last_time
else:
sql = """
SELECT * FROM comments
"""
url = self.elasticsearch_index_url + ':' + str(self.elasticsearch_port) + '/wherehows/comment/_bulk'
params = []
self.wh_cursor.execute(sql)
rows = DbUtil.copy_dict_cursor(self.wh_cursor)
row_count = 1
for row in rows:
params.append('{ "index": { "_id": ' + str(row['id']) + ', "parent": ' + str(row['dataset_id']) + ' }}')
params.append(
"""{ "text": %s, "user_id": %d, "dataset_id": %d, "comment_type": "%s"}"""
% (json.dumps(row['text']) if row['text'] else '', row['user_id'] if row['user_id'] else 0,
row['dataset_id'] if row['dataset_id'] else 0, row['comment_type'] if row['comment_type'] else ''))
if row_count % 1000 == 0:
self.bulk_insert(params, url)
params = []
row_count += 1
if len(params) > 0:
self.bulk_insert(params, url)
def update_dataset(self, last_unixtime=None):
if last_unixtime:
sql = """
SELECT * FROM dict_dataset WHERE from_unixtime(modified_time) >= DATE_SUB(from_unixtime(%f), INTERVAL 1 HOUR)
""" % last_unixtime
else:
sql = """
SELECT * FROM dict_dataset
"""
url = self.elasticsearch_index_url + ':' + str(self.elasticsearch_port) + '/wherehows/dataset/_bulk'
params = []
self.wh_cursor.execute(sql)
rows = DbUtil.copy_dict_cursor(self.wh_cursor)
row_count = 1
for row in rows:
params.append('{ "index": { "_id": ' + str(row['id']) + ' }}')
params.append(
"""{ "name": "%s", "source": "%s", "urn": "%s", "location_prefix": "%s", "parent_name": "%s","schema_type": "%s", "properties": %s, "schema": %s , "fields": %s}"""
% (row['name'] if row['name'] else '', row['source'] if row['source'] else '',
row['urn'] if row['urn'] else '', row['location_prefix'] if row['location_prefix'] else '',
row['parent_name'] if row['parent_name'] else '', row['schema_type'] if row['schema_type'] else '',
json.dumps(row['properties']) if row['properties'] else '',
json.dumps(row['schema']) if row['schema'] else '', json.dumps(row['fields']) if row['fields'] else ''))
if row_count % 1000 == 0:
self.bulk_insert(params, url)
params = []
row_count += 1
if len(params) > 0:
self.bulk_insert(params, url)
def update_metric(self):
sql = """
SELECT * FROM dict_business_metric
"""
url = self.elasticsearch_index_url + ':' + str(self.elasticsearch_port) + '/wherehows/metric/_bulk'
params = []
self.wh_cursor.execute(sql)
rows = DbUtil.copy_dict_cursor(self.wh_cursor)
row_count = 1
for row in rows:
params.append('{ "index": { "_id": ' + str(row['metric_id']) + ' }}')
params.append(
"""{"metric_id": %d, "metric_name": %s, "metric_description": %s, "dashboard_name": %s, "metric_group": %s, "metric_category": %s, "metric_sub_category": %s, "metric_level": %s, "metric_source_type": %s, "metric_source": %s, "metric_source_dataset_id": %d, "metric_ref_id_type": %s, "metric_ref_id": %s, "metric_type": %s, "metric_additive_type": %s, "metric_grain": %s, "metric_display_factor": %f, "metric_display_factor_sym": %s, "metric_good_direction": %s, "metric_formula": %s, "dimensions": %s, "owners": %s, "tags": %s, "urn": %s, "metric_url": %s, "wiki_url": %s, "scm_url": %s}"""
% (row['metric_id'], json.dumps(row['metric_name']) if row['metric_name'] else json.dumps(''),
json.dumps(row['metric_description']) if row['metric_description'] else json.dumps(''),
json.dumps(row['dashboard_name']) if row['dashboard_name'] else json.dumps(''),
json.dumps(row['metric_group']) if row['metric_group'] else json.dumps(''),
json.dumps(row['metric_category']) if row['metric_category'] else json.dumps(''),
json.dumps(row['metric_sub_category']) if row['metric_sub_category'] else json.dumps(''),
json.dumps(row['metric_level']) if row['metric_level'] else json.dumps(''),
json.dumps(row['metric_source_type']) if row['metric_source_type'] else json.dumps(''),
json.dumps(row['metric_source']) if row['metric_source'] else json.dumps(''),
row['metric_source_dataset_id'] if row['metric_source_dataset_id'] else 0,
json.dumps(row['metric_ref_id_type']) if row['metric_ref_id_type'] else json.dumps(''),
json.dumps(row['metric_ref_id']) if row['metric_ref_id'] else json.dumps(''),
json.dumps(row['metric_type']) if row['metric_type'] else json.dumps(''),
json.dumps(row['metric_additive_type']) if row['metric_additive_type'] else json.dumps(''),
json.dumps(row['metric_grain']) if row['metric_grain'] else json.dumps(''),
row['metric_display_factor'] if row['metric_display_factor'] else 0.0,
json.dumps(row['metric_display_factor_sym']) if row['metric_display_factor_sym'] else json.dumps(''),
json.dumps(row['metric_good_direction']) if row['metric_good_direction'] else json.dumps(''),
json.dumps(row['metric_formula']) if row['metric_formula'] else json.dumps(''),
json.dumps(row['dimensions']) if row['dimensions'] else json.dumps(''),
json.dumps(row['owners']) if row['owners'] else json.dumps(''),
json.dumps(row['tags']) if row['tags'] else json.dumps(''),
json.dumps(row['urn']) if row['urn'] else json.dumps(''),
json.dumps(row['metric_url']) if row['metric_url'] else json.dumps(''),
json.dumps(row['wiki_url']) if row['wiki_url'] else json.dumps(''),
json.dumps(row['scm_url']) if row['scm_url'] else json.dumps('')))
if row_count % 1000 == 0:
self.bulk_insert(params, url)
params = []
row_count += 1
if len(params) > 0:
self.bulk_insert(params, url)
def update_flow_jobs(self, last_unixtime=None):
if last_unixtime:
flow_sql = """
SELECT a.app_code, f.* FROM flow f JOIN cfg_application a on f.app_id = a.app_id
WHERE from_unixtime(modified_time) >= DATE_SUB(from_unixtime(%f), INTERVAL 1 HOUR)
""" % last_unixtime
else:
flow_sql = """
SELECT a.app_code, f.* FROM flow f JOIN cfg_application a on f.app_id = a.app_id
"""
job_sql = """
SELECT * FROM flow_job WHERE app_id = %d and flow_id = %d
"""
url = self.elasticsearch_index_url + ':' + str(self.elasticsearch_port) + '/wherehows/flow_jobs/_bulk'
params = []
self.wh_cursor.execute(flow_sql)
rows = DbUtil.copy_dict_cursor(self.wh_cursor)
row_count = 1
for row in rows:
self.wh_cursor.execute(job_sql %(long(row['app_id']), long(row['flow_id'])))
jobs = []
job_rows = DbUtil.copy_dict_cursor(self.wh_cursor)
if job_rows:
for job_row in job_rows:
jobs.append({"app_id": job_row['app_id'], "flow_id": job_row['flow_id'], "job_id": job_row['job_id'],
"job_name": job_row['job_name'] if job_row['job_name'] else '',
"job_path": job_row['job_path'] if job_row['job_path'] else '',
"job_type_id": job_row['job_type_id'],
"job_type": job_row['job_type'] if job_row['job_type'] else '',
"pre_jobs": job_row['pre_jobs'] if job_row['pre_jobs'] else '',
"post_jobs": job_row['post_jobs'] if job_row['post_jobs'] else '',
"is_current": job_row['is_current'] if job_row['is_current'] else '',
"is_first": job_row['is_first'] if job_row['is_first'] else '',
"is_last": job_row['is_last'] if job_row['is_last'] else ''})
params.append('{ "index": { "_id": ' + str(long(row['flow_id'])*10000 + long(row['app_id'])) + ' }}')
if len(jobs) > 0:
params.append(
"""{"app_id": %d, "flow_id": %d, "app_code": "%s", "flow_name": "%s", "flow_group": "%s", "flow_path": "%s", "flow_level": %d, "is_active": "%s", "is_scheduled": "%s", "pre_flows": "%s", "jobs": %s}"""
% (row['app_id'], row['flow_id'], row['app_code'] if row['app_code'] else '',
row['flow_name'] if row['flow_name'] else '', row['flow_group'] if row['flow_group'] else '',
row['flow_path'] if row['flow_path'] else '', row['flow_level'],
row['is_active'] if row['is_active'] else '', row['is_scheduled'] if row['is_scheduled'] else '',
row['pre_flows'] if row['pre_flows'] else '', json.dumps(jobs)))
else:
params.append(
"""{"app_id": %d, "flow_id": %d, "app_code": "%s", "flow_name": "%s", "flow_group": "%s", "flow_path": "%s", "flow_level": %d, "is_active": "%s", "is_scheduled": "%s", "pre_flows": "%s", "jobs": ""}"""
% (row['app_id'], row['flow_id'], row['app_code'] if row['app_code'] else '',
row['flow_name'] if row['flow_name'] else '', row['flow_group'] if row['flow_group'] else '',
row['flow_path'] if row['flow_path'] else '', row['flow_level'],
row['is_active'] if row['is_active'] else '', row['is_scheduled'] if row['is_scheduled'] else '',
row['pre_flows'] if row['pre_flows'] else ''))
if row_count % 1000 == 0:
self.bulk_insert(params, url)
self.logger.info(str(row_count))
params = []
row_count += 1
if len(params) > 0:
self.logger.info(str(len(params)))
self.bulk_insert(params, url)
def run(self):
try:
self.update_dataset()
self.update_comment()
self.update_dataset_field()
self.update_flow_jobs()
self.update_metric()
finally:
self.wh_cursor.close()
self.wh_con.close()
if __name__ == "__main__":
props = sys.argv[1]
esi = ElasticSearchIndex(props)
esi.run()
|
|
"""Terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import argparse
import datetime
import inspect
import platform
import sys
import warnings
from collections import Counter
from functools import partial
from pathlib import Path
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import Generator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import TextIO
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
import attr
import pluggy
import py
import _pytest._version
from _pytest import nodes
from _pytest import timing
from _pytest._code import ExceptionInfo
from _pytest._code.code import ExceptionRepr
from _pytest._io.wcwidth import wcswidth
from _pytest.compat import final
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.main import Session
REPORT_COLLECTING_RESOLUTION = 0.5
KNOWN_TYPES = (
"failed",
"passed",
"skipped",
"deselected",
"xfailed",
"xpassed",
"warnings",
"error",
)
_REPORTCHARS_DEFAULT = "fE"
class MoreQuietAction(argparse.Action):
"""A modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time.
Used to unify verbosity handling.
"""
def __init__(
self,
option_strings: Sequence[str],
dest: str,
default: object = None,
required: bool = False,
help: Optional[str] = None,
) -> None:
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[object], None],
option_string: Optional[str] = None,
) -> None:
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="increase verbosity.",
)
group._addoption(
"--no-header",
action="store_true",
default=False,
dest="no_header",
help="disable header",
)
group._addoption(
"--no-summary",
action="store_true",
default=False,
dest="no_summary",
help="disable summary",
)
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="decrease verbosity.",
)
group._addoption(
"--verbosity",
dest="verbose",
type=int,
default=0,
help="set verbosity. Default is 0.",
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default=_REPORTCHARS_DEFAULT,
metavar="chars",
help="show extra test summary info as specified by chars: (f)ailed, "
"(E)rror, (s)kipped, (x)failed, (X)passed, "
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
"(w)arnings are enabled by default (see --disable-warnings), "
"'N' can be used to reset the list. (default: 'fE').",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="show locals in tracebacks (disabled by default).",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="traceback print mode (auto/long/short/line/native/no).",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default is 'all'.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="don't cut any tracebacks (default is to cut).",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="color terminal output (yes/no/auto).",
)
group._addoption(
"--code-highlight",
default="yes",
choices=["yes", "no"],
help="Whether code should be highlighted (only if --color is also enabled)",
)
parser.addini(
"console_output_style",
help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").',
default="progress",
)
def pytest_configure(config: Config) -> None:
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config: Config) -> str:
reportchars: str = config.option.reportchars
old_aliases = {"F", "S"}
reportopts = ""
for char in reportchars:
if char in old_aliases:
char = char.lower()
if char == "a":
reportopts = "sxXEf"
elif char == "A":
reportopts = "PpsxXEf"
elif char == "N":
reportopts = ""
elif char not in reportopts:
reportopts += char
if not config.option.disable_warnings and "w" not in reportopts:
reportopts = "w" + reportopts
elif config.option.disable_warnings and "w" in reportopts:
reportopts = reportopts.replace("w", "")
return reportopts
@hookimpl(trylast=True) # after _pytest.runner
def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
letter = "F"
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
outcome: str = report.outcome
if report.when in ("collect", "setup", "teardown") and outcome == "failed":
outcome = "error"
letter = "E"
return outcome, letter, outcome.upper()
@attr.s
class WarningReport:
"""Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
:ivar str message:
User friendly message about the warning.
:ivar str|None nodeid:
nodeid that generated the warning (see ``get_location``).
:ivar tuple|py.path.local fslocation:
File system location of the source of the warning (see ``get_location``).
"""
message = attr.ib(type=str)
nodeid = attr.ib(type=Optional[str], default=None)
fslocation = attr.ib(
type=Optional[Union[Tuple[str, int], py.path.local]], default=None
)
count_towards_summary = True
def get_location(self, config: Config) -> Optional[str]:
"""Return the more user-friendly information about the location of a warning, or None."""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = bestrelpath(
config.invocation_params.dir, absolutepath(filename)
)
return f"{relpath}:{linenum}"
else:
return str(self.fslocation)
return None
@final
class TerminalReporter:
def __init__(self, config: Config, file: Optional[TextIO] = None) -> None:
import _pytest.config
self.config = config
self._numcollected = 0
self._session: Optional[Session] = None
self._showfspath: Optional[bool] = None
self.stats: Dict[str, List[Any]] = {}
self._main_color: Optional[str] = None
self._known_types: Optional[List[str]] = None
self.startdir = config.invocation_dir
self.startpath = config.invocation_params.dir
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
self._screen_width = self._tw.fullwidth
self.currentfspath: Union[None, Path, str, int] = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported: Set[str] = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write: Optional[float] = None
self._already_displayed_warnings: Optional[int] = None
self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None
def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture", "no") == "no":
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
cfg: str = self.config.getini("console_output_style")
if cfg == "progress":
return "progress"
elif cfg == "count":
return "count"
else:
return False
@property
def verbosity(self) -> int:
verbosity: int = self.config.option.verbose
return verbosity
@property
def showheader(self) -> bool:
return self.verbosity >= 0
@property
def no_header(self) -> bool:
return bool(self.config.option.no_header)
@property
def no_summary(self) -> bool:
return bool(self.config.option.no_summary)
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.verbosity >= 0
return self._showfspath
@showfspath.setter
def showfspath(self, value: Optional[bool]) -> None:
self._showfspath = value
@property
def showlongtestinfo(self) -> bool:
return self.verbosity > 0
def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None:
fspath = self.config.rootpath / nodeid.split("::")[0]
if self.currentfspath is None or fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
relfspath = bestrelpath(self.startpath, fspath)
self._tw.line()
self._tw.write(relfspath + " ")
self._tw.write(res, flush=True, **markup)
def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self) -> None:
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
self._tw.write(content, flush=flush, **markup)
def flush(self) -> None:
self._tw.flush()
def write_line(self, line: Union[str, bytes], **markup: bool) -> None:
if not isinstance(line, str):
line = str(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line: str, **markup: bool) -> None:
"""Rewinds the terminal cursor to the beginning and writes the given line.
:param erase:
If True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(
self,
sep: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
**markup: bool,
) -> None:
self.ensure_newline()
self._tw.sep(sep, title, fullwidth, **markup)
def section(self, title: str, sep: str = "=", **kw: bool) -> None:
self._tw.sep(sep, title, **kw)
def line(self, msg: str, **kw: bool) -> None:
self._tw.line(msg, **kw)
def _add_stats(self, category: str, items: Sequence[Any]) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items)
if set_main_color:
self._set_main_color()
def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return True
def pytest_warning_recorded(
self, warning_message: warnings.WarningMessage, nodeid: str,
) -> None:
from _pytest.warnings import warning_record_to_str
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
self._add_stats("warnings", [warning_report])
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
msg = f"PLUGIN registered: {plugin}"
# XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line.
self.write_line(msg)
def pytest_deselected(self, items: Sequence[Item]) -> None:
self._add_stats("deselected", items)
def pytest_runtest_logstart(
self, nodeid: str, location: Tuple[str, Optional[int], str]
) -> None:
# Ensure that the path is printed before the
# 1st test of a module starts running.
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
self.flush()
elif self.showfspath:
self.write_fspath_result(nodeid, "")
self.flush()
def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tests_ran = True
rep = report
res: Tuple[
str, str, Union[str, Tuple[str, Mapping[str, bool]]]
] = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
category, letter, word = res
if not isinstance(word, tuple):
markup = None
else:
word, markup = word
self._add_stats(category, [rep])
if not letter and not word:
# Probably passed setup/teardown.
return
running_xdist = hasattr(rep, "node")
if markup is None:
was_xfail = hasattr(report, "wasxfail")
if rep.passed and not was_xfail:
markup = {"green": True}
elif rep.passed and was_xfail:
markup = {"yellow": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if rep.skipped or hasattr(report, "wasxfail"):
available_width = (
(self._tw.fullwidth - self._tw.width_of_current_line)
- len(" [100%]")
- 1
)
reason = _get_raw_skip_reason(rep)
reason_ = _format_trimmed(" ({})", reason, available_width)
if reason and reason_ is not None:
self._tw.write(reason_)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
self.flush()
@property
def _is_last_item(self) -> bool:
assert self._session is not None
return len(self._progress_nodeids_reported) == self._session.testscollected
def pytest_runtest_logfinish(self, nodeid: str) -> None:
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
if self._show_progress_info == "count":
num_tests = self._session.testscollected
progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests)))
else:
progress_length = len(" [100%]")
self._progress_nodeids_reported.add(nodeid)
if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", **{main_color: True})
def _get_progress_information_message(self) -> str:
assert self._session
collected = self._session.testscollected
if self._show_progress_info == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
format_string = f" [{counter_format}/{{}}]"
return format_string.format(len(progress), collected)
return f" [ {collected} / {collected} ]"
else:
if collected:
return " [{:3d}%]".format(
len(self._progress_nodeids_reported) * 100 // collected
)
return " [100%]"
def _write_progress_information_filling_space(self) -> None:
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), flush=True, **{color: True})
@property
def _width_of_current_line(self) -> int:
"""Return the width of the current line."""
return self._tw.width_of_current_line
def pytest_collection(self) -> None:
if self.isatty:
if self.config.option.verbose >= 0:
self.write("collecting ... ", flush=True, bold=True)
self._collect_report_last_write = timing.time()
elif self.config.option.verbose >= 1:
self.write("collecting ... ", flush=True, bold=True)
def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self._add_stats("error", [report])
elif report.skipped:
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
def report_collect(self, final: bool = False) -> None:
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = timing.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - errors - skipped - deselected
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self._numcollected > selected > 0:
line += " / %d selected" % selected
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@hookimpl(trylast=True)
def pytest_sessionstart(self, session: "Session") -> None:
self._session = session
self._sessionstarttime = timing.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
if not self.no_header:
msg = f"platform {sys.platform} -- Python {verinfo}"
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info:
verinfo = ".".join(map(str, pypy_version_info[:3]))
msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3])
msg += ", pytest-{}, py-{}, pluggy-{}".format(
_pytest._version.version, py.__version__, pluggy.__version__
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(
self, lines: Sequence[Union[str, Sequence[str]]]
) -> None:
for line_or_lines in reversed(lines):
if isinstance(line_or_lines, str):
self.write_line(line_or_lines)
else:
for line in line_or_lines:
self.write_line(line)
def pytest_report_header(self, config: Config) -> List[str]:
line = "rootdir: %s" % config.rootpath
if config.inipath:
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
testpaths: List[str] = config.getini("testpaths")
if config.invocation_params.dir == config.rootpath and config.args == testpaths:
line += ", testpaths: {}".format(", ".join(testpaths))
result = [line]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return result
def pytest_collection_finish(self, session: "Session") -> None:
self.report_collect(True)
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items
)
self._write_report_lines_from_hooks(lines)
if self.config.getoption("collectonly"):
if session.items:
if self.config.option.verbose > -1:
self._tw.line("")
self._printcollecteditems(session.items)
failed = self.stats.get("failed")
if failed:
self._tw.sep("!", "collection failures")
for rep in failed:
rep.toterminal(self._tw)
def _printcollecteditems(self, items: Sequence[Item]) -> None:
# To print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway.
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
self._tw.line(item.nodeid)
return
stack: List[Node] = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
if col.name == "()": # Skip Instances.
continue
indent = (len(stack) - 1) * " "
self._tw.line(f"{indent}{col}")
if self.config.option.verbose >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
for line in doc.splitlines():
self._tw.line("{}{}".format(indent + " ", line))
@hookimpl(hookwrapper=True)
def pytest_sessionfinish(
self, session: "Session", exitstatus: Union[int, ExitCode]
):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
ExitCode.OK,
ExitCode.TESTS_FAILED,
ExitCode.INTERRUPTED,
ExitCode.USAGE_ERROR,
ExitCode.NO_TESTS_COLLECTED,
)
if exitstatus in summary_exit_codes and not self.no_summary:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus, config=self.config
)
if session.shouldfail:
self.write_sep("!", str(session.shouldfail), red=True)
if exitstatus == ExitCode.INTERRUPTED:
self._report_keyboardinterrupt()
self._keyboardinterrupt_memo = None
elif session.shouldstop:
self.write_sep("!", str(session.shouldstop), red=True)
self.summary_stats()
@hookimpl(hookwrapper=True)
def pytest_terminal_summary(self) -> Generator[None, None, None]:
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
yield
self.short_test_summary()
# Display any extra warnings from teardown here (if any).
self.summary_warnings()
def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self) -> None:
if self._keyboardinterrupt_memo is not None:
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self) -> None:
excrepr = self._keyboardinterrupt_memo
assert excrepr is not None
assert excrepr.reprcrash is not None
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --full-trace)",
yellow=True,
)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path.
if fspath:
res = mkrel(nodeid)
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + bestrelpath(self.startpath, fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
head_line = rep.head_line
if head_line:
return head_line
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# Summaries for sessionfinish.
#
def getreports(self, name: str):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, "_pdbshown"):
values.append(x)
return values
def summary_warnings(self) -> None:
if self.hasopt("w"):
all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings")
if not all_warnings:
return
final = self._already_displayed_warnings is not None
if final:
warning_reports = all_warnings[self._already_displayed_warnings :]
else:
warning_reports = all_warnings
self._already_displayed_warnings = len(warning_reports)
if not warning_reports:
return
reports_grouped_by_message: Dict[str, List[WarningReport]] = {}
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
def collapsed_location_report(reports: List[WarningReport]) -> str:
locations = []
for w in reports:
location = w.get_location(self.config)
if location:
locations.append(location)
if len(locations) < 10:
return "\n".join(map(str, locations))
counts_by_filename = Counter(
str(loc).split("::", 1)[0] for loc in locations
)
return "\n".join(
"{}: {} warning{}".format(k, v, "s" if v > 1 else "")
for k, v in counts_by_filename.items()
)
title = "warnings summary (final)" if final else "warnings summary"
self.write_sep("=", title, yellow=True, bold=False)
for message, message_reports in reports_grouped_by_message.items():
maybe_location = collapsed_location_report(message_reports)
if maybe_location:
self._tw.line(maybe_location)
lines = message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line("-- Docs: https://docs.pytest.org/en/stable/warnings.html")
def summary_passes(self) -> None:
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports: List[TestReport] = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, green=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def _get_teardown_reports(self, nodeid: str) -> List[TestReport]:
reports = self.getreports("")
return [
report
for report in reports
if report.when == "teardown" and report.nodeid == nodeid
]
def _handle_teardown_sections(self, nodeid: str) -> None:
for report in self._get_teardown_reports(nodeid):
self.print_teardown_sections(report)
def print_teardown_sections(self, rep: TestReport) -> None:
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self) -> None:
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
if self.config.option.tbstyle == "line":
for rep in reports:
line = self._getcrashline(rep)
self.write_line(line)
else:
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def summary_errors(self) -> None:
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if rep.when == "collect":
msg = "ERROR collecting " + msg
else:
msg = f"ERROR at {rep.when} of {msg}"
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
def _outrep_summary(self, rep: BaseReport) -> None:
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self) -> None:
if self.verbosity < -1:
return
session_duration = timing.time() - self._sessionstarttime
(parts, main_color) = self.build_summary_stats_line()
line_parts = []
display_sep = self.verbosity >= 0
if display_sep:
fullwidth = self._tw.fullwidth
for text, markup in parts:
with_markup = self._tw.markup(text, **markup)
if display_sep:
fullwidth += len(with_markup) - len(text)
line_parts.append(with_markup)
msg = ", ".join(line_parts)
main_markup = {main_color: True}
duration = " in {}".format(format_session_duration(session_duration))
duration_with_markup = self._tw.markup(duration, **main_markup)
if display_sep:
fullwidth += len(duration_with_markup) - len(duration)
msg += duration_with_markup
if display_sep:
markup_for_end_sep = self._tw.markup("", **main_markup)
if markup_for_end_sep.endswith("\x1b[0m"):
markup_for_end_sep = markup_for_end_sep[:-4]
fullwidth += len(markup_for_end_sep)
msg += markup_for_end_sep
if display_sep:
self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
else:
self.write_line(msg, **main_markup)
def short_test_summary(self) -> None:
if not self.reportchars:
return
def show_simple(stat, lines: List[str]) -> None:
failed = self.stats.get(stat, [])
if not failed:
return
termwidth = self._tw.fullwidth
config = self.config
for rep in failed:
line = _get_line_with_reprcrash_message(config, rep, termwidth)
lines.append(line)
def show_xfailed(lines: List[str]) -> None:
xfailed = self.stats.get("xfailed", [])
for rep in xfailed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
lines.append(f"{verbose_word} {pos}")
reason = rep.wasxfail
if reason:
lines.append(" " + str(reason))
def show_xpassed(lines: List[str]) -> None:
xpassed = self.stats.get("xpassed", [])
for rep in xpassed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
reason = rep.wasxfail
lines.append(f"{verbose_word} {pos} {reason}")
def show_skipped(lines: List[str]) -> None:
skipped: List[CollectReport] = self.stats.get("skipped", [])
fskips = _folded_skips(self.startpath, skipped) if skipped else []
if not fskips:
return
verbose_word = skipped[0]._get_verbose_word(self.config)
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
if lineno is not None:
lines.append(
"%s [%d] %s:%d: %s"
% (verbose_word, num, fspath, lineno, reason)
)
else:
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
"x": show_xfailed,
"X": show_xpassed,
"f": partial(show_simple, "failed"),
"s": show_skipped,
"p": partial(show_simple, "passed"),
"E": partial(show_simple, "error"),
}
lines: List[str] = []
for char in self.reportchars:
action = REPORTCHAR_ACTIONS.get(char)
if action: # skipping e.g. "P" (passed with output) here.
action(lines)
if lines:
self.write_sep("=", "short test summary info")
for line in lines:
self.write_line(line)
def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types
def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color
def _set_main_color(self) -> None:
unknown_types: List[str] = []
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
"""
Build the parts used in the last summary stats line.
The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
This function builds a list of the "parts" that make up for the text in that line, in
the example above it would be:
[
("12 passed", {"green": True}),
("2 errors", {"red": True}
]
That last dict for each line is a "markup dictionary", used by TerminalWriter to
color output.
The final color of the line is also determined by this function, and is the second
element of the returned tuple.
"""
if self.config.getoption("collectonly"):
return self._build_collect_only_summary_stats_line()
else:
return self._build_normal_summary_stats_line()
def _get_reports_to_display(self, key: str) -> List[Any]:
"""Get test/collection reports for the given status key, such as `passed` or `error`."""
reports = self.stats.get(key, [])
return [x for x in reports if getattr(x, "count_towards_summary", True)]
def _build_normal_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self._get_reports_to_display(key)
if reports:
count = len(reports)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % pluralize(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _build_collect_only_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
deselected = len(self._get_reports_to_display("deselected"))
errors = len(self._get_reports_to_display("error"))
if self._numcollected == 0:
parts = [("no tests collected", {"yellow": True})]
main_color = "yellow"
elif deselected == 0:
main_color = "green"
collected_output = "%d %s collected" % pluralize(self._numcollected, "test")
parts = [(collected_output, {main_color: True})]
else:
all_tests_were_deselected = self._numcollected == deselected
if all_tests_were_deselected:
main_color = "yellow"
collected_output = f"no tests collected ({deselected} deselected)"
else:
main_color = "green"
selected = self._numcollected - deselected
collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
parts = [(collected_output, {main_color: True})]
if errors:
main_color = _color_for_type["error"]
parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})]
return parts, main_color
def _get_pos(config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
return nodeid
def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
"""Format msg into format, ellipsizing it if doesn't fit in available_width.
Returns None if even the ellipsis can't fit.
"""
# Only use the first line.
i = msg.find("\n")
if i != -1:
msg = msg[:i]
ellipsis = "..."
format_width = wcswidth(format.format(""))
if format_width + len(ellipsis) > available_width:
return None
if format_width + wcswidth(msg) > available_width:
available_width -= len(ellipsis)
msg = msg[:available_width]
while format_width + wcswidth(msg) > available_width:
msg = msg[:-1]
msg += ellipsis
return format.format(msg)
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, termwidth: int
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word = rep._get_verbose_word(config)
pos = _get_pos(config, rep)
line = f"{verbose_word} {pos}"
line_width = wcswidth(line)
try:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
available_width = termwidth - line_width
msg = _format_trimmed(" - {}", msg, available_width)
if msg is not None:
line += msg
return line
def _folded_skips(
startpath: Path, skipped: Sequence[CollectReport],
) -> List[Tuple[int, str, Optional[int], str]]:
d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {}
for event in skipped:
assert event.longrepr is not None
assert isinstance(event.longrepr, tuple), (event, event.longrepr)
assert len(event.longrepr) == 3, (event, event.longrepr)
fspath, lineno, reason = event.longrepr
# For consistency, report all fspaths in relative form.
fspath = bestrelpath(startpath, Path(fspath))
keywords = getattr(event, "keywords", {})
# Folding reports with global pytestmark variable.
# This is a workaround, because for now we cannot identify the scope of a skip marker
# TODO: Revisit after marks scope would be fixed.
if (
event.when == "setup"
and "skip" in keywords
and "pytestmark" not in keywords
):
key: Tuple[str, Optional[int], str] = (fspath, None, reason)
else:
key = (fspath, lineno, reason)
d.setdefault(key, []).append(event)
values: List[Tuple[int, str, Optional[int], str]] = []
for key, events in d.items():
values.append((len(events), *key))
return values
_color_for_type = {
"failed": "red",
"error": "red",
"warnings": "yellow",
"passed": "green",
}
_color_for_type_default = "yellow"
def pluralize(count: int, noun: str) -> Tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
if noun not in ["error", "warnings", "test"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
# set it to singular here so we can determine plurality in the same way as we do
# for `error`.
noun = noun.replace("warnings", "warning")
return count, noun + "s" if count != 1 else noun
def _plugin_nameversions(plugininfo) -> List[str]:
values: List[str] = []
for plugin, dist in plugininfo:
# Gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# Questionable convenience, but it keeps things short.
if name.startswith("pytest-"):
name = name[7:]
# We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
def format_session_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
return f"{seconds:.2f}s"
else:
dt = datetime.timedelta(seconds=int(seconds))
return f"{seconds:.2f}s ({dt})"
def _get_raw_skip_reason(report: TestReport) -> str:
"""Get the reason string of a skip/xfail/xpass test report.
The string is just the part given by the user.
"""
if hasattr(report, "wasxfail"):
reason = cast(str, report.wasxfail)
if reason.startswith("reason: "):
reason = reason[len("reason: ") :]
return reason
else:
assert report.skipped
assert isinstance(report.longrepr, tuple)
_, _, reason = report.longrepr
if reason.startswith("Skipped: "):
reason = reason[len("Skipped: ") :]
elif reason == "Skipped":
reason = ""
return reason
|
|
# Modified from https://github.com/lisa-lab/DeepLearningTutorials/blob/master/code/convolutional_mlp.py
# (c) 2010--2015, Deep Learning Tutorials Development Team
# added functionality for saving the parameteres, reloading and testing the model on custom images.
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
from __future__ import print_function
import os
import sys
import timeit
import numpy
import cPickle as pickle
from data_utils import save_model, load_params, epoch_from_filename
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv2d
from mlp_modified import HiddenLayer, LogisticRegression, load_data
import fli
import logging
logfilename= '../logs/mlp_convolutional_modified.log'
activation_convmlp= T.nnet.relu #T.tanh
n_epochs_convmlp=1000
saveepochs_convmlp = numpy.arange(0, n_epochs_convmlp + 1, 10)
add_blurs = False
blur = 2
testrun= False
loadparams = False
rotation_angles = [10, 5, -5, -10]
#If loadparams is True, then the parameters are loaded from this file,
# n_epochs_mlp must be greater than the starting epoch number,
# which is extracted from the paramsfilename.
paramsfilename = '../data/models/best_model_convolutional_mlp_110_pars__zero_angles_10_5_-5_-10_.pkl'
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), W=None, b=None):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
if W is None:
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
else:
self.W=W
self.b=b
# convolve input feature maps with filters
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = activation_convmlp(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def evaluate_lenet5(learning_rate=0.1, n_epochs=n_epochs_convmlp, dataset='mnist.pkl.gz', nkerns=[20, 50],
batch_size=500, thislogfilename = logfilename,
loadparams=loadparams, paramsfilename=paramsfilename,
randomInit=False, testrun=testrun, add_blurs=add_blurs, blur=blur, rot_angles = rotation_angles, annotation =''):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
loadedparams = [None] * 8
if loadparams:
print("Loading params from " + paramsfilename + "...")
loadedparams = load_params(paramsfilename)
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset, add_the_blurs=add_blurs, blur = blur, angles = rot_angles)
if len(rot_angles)>0:
annotation += '_angles_'
for ang in rot_angles:
annotation += str(ang)+'_'
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches //= batch_size
n_valid_batches //= batch_size
n_test_batches //= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2),
W = loadedparams[6],
b=loadedparams[7]
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2),
W = loadedparams[4],
b = loadedparams[5]
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=activation_convmlp,
W=loadedparams[2],
b=loadedparams[3]
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output,
n_in=500, n_out=10,
W=loadedparams[0],
b=loadedparams[1])
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
# CCC Commenting out patience for simplicity and transparency's sake
# patience = 10000 # look as this many examples regardless
# patience_increase = 2 # wait this much longer when a new best is
# # found
# improvement_threshold = 0.995 # a relative improvement of this much is
# # considered significant
validation_frequency = n_train_batches #min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
if loadparams:
epoch = epoch_from_filename(paramsfilename)
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print('training @ iter = ', iter)
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
# CCC if this_validation_loss < best_validation_loss * \
# improvement_threshold:
# patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in range(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
# CCC if patience <= iter:
# done_looping = True
# break
if epoch in saveepochs_convmlp:
# test it on the test set
epoch_test_losses = [test_model(i) for i
in range(n_test_batches)]
epoch_test_score = numpy.mean(epoch_test_losses)
print(('epoch %i, test error of '
'best model %f %%') %
(epoch, epoch_test_score * 100.))
save_model(params, epoch, best_validation_loss, epoch_test_score, '../data/models/best_model_convolutional_mlp_'
, randomInit, add_blurs, testrun, thislogfilename, endrun = (n_epochs == epoch), annotation = annotation)
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
def predict_on_mnist(modelfilename, activation=activation_convmlp, test_data='test', saveToFile=False, diagnose = False):
gg = open(modelfilename, 'rb')
params = pickle.load(gg)
gg.close()
nkerns = [20, 50]
batch_size = 1
poolsize = (2, 2)
dataset = 'mnist.pkl.gz'
datasets = load_data(
dataset)
if (test_data == 'test'):
test_set_x, test_set_y = datasets[2]
test_data_str = '_test'
elif (test_data == 'validation'):
test_set_x, test_set_y = datasets[1]
test_data_str = '_validation'
elif (test_data == 'train'):
test_set_x, test_set_y = datasets[0]
test_data_str = '_train'
index = T.lscalar()
layer0_input = test_set_x[index].reshape((batch_size, 1, 28, 28))
conv_out_0 = conv2d(
input=layer0_input,
filters=params[6],
input_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5)
)
# downsample each feature map individually, using maxpooling
pooled_out_0 = downsample.max_pool_2d(
input=conv_out_0,
ds=poolsize,
ignore_border=True
)
output_0 = activation(pooled_out_0 + params[7].dimshuffle('x', 0, 'x', 'x'))
conv_out_1 = conv2d(
input=output_0,
filters=params[4],
input_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
)
# downsample each feature map individually, using maxpooling
pooled_out_1 = downsample.max_pool_2d(
input=conv_out_1,
ds=poolsize,
ignore_border=True
)
output_1 = activation(pooled_out_1 + params[5].dimshuffle('x', 0, 'x', 'x'))
output_2 = activation(T.dot(output_1.flatten(2), params[2]) + params[3])
final_output = T.dot(output_2, params[0]) + params[1]
p_y_given_x = T.nnet.softmax(final_output)
y_pred = T.argmax(p_y_given_x, axis=1)
ind_arr = numpy.arange(10, dtype=numpy.uint8)
testfunc = theano.function([index], [y_pred[0], test_set_y[index]])
infofunc = theano.function([index], p_y_given_x)
range = test_set_x.shape[0].eval()
wrongpredictions = []
for j in xrange(range):
prediction = testfunc(j)
correct = (prediction[0] == prediction[1])
if correct == False:
print('The prediction ' + str(prediction[0]) + ' for index ' + str(j) + ' is wrong . The correct value is '
+ str(prediction[1]) + '.')
if diagnose:
err_arr = sorted(zip(ind_arr,infofunc(j)[0]), key=lambda x: x[1], reverse=True)
print(err_arr)
print('---')
wrongpredictions.append([j, prediction[0], prediction[1], test_set_x[j]])
print('There are ' + str(len(wrongpredictions)) + ' errors.')
if saveToFile:
gg = open('../data/lenet_test_errors' + test_data_str + '.pkl', 'wb')
pickle.dump(wrongpredictions, gg, protocol=pickle.HIGHEST_PROTOCOL)
gg.close()
return wrongpredictions
def predict_custom_image(params, testImgFilename='own_0.png', activation= activation_convmlp, testImgFilenameDir = '../data/custom/'):
test_img_value = filter(str.isdigit, testImgFilename)
test_img = fli.processImg(testImgFilenameDir, testImgFilename)
nkerns = [20, 50]
batch_size = 1
poolsize = (2, 2)
layer0_input = test_img.reshape((batch_size, 1, 28, 28)).astype(numpy.float32)
conv_out_0 = conv2d(
input=layer0_input,
filters=params[6],
input_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5)
)
# downsample each feature map individually, using maxpooling
pooled_out_0 = downsample.max_pool_2d(
input=conv_out_0,
ds=poolsize,
ignore_border=True
)
output_0 = activation(pooled_out_0 + params[7].dimshuffle('x', 0, 'x', 'x'))
conv_out_1 = conv2d(
input=output_0,
filters=params[4],
input_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
)
# downsample each feature map individually, using maxpooling
pooled_out_1 = downsample.max_pool_2d(
input=conv_out_1,
ds=poolsize,
ignore_border=True
)
output_1 = activation(pooled_out_1 + params[5].dimshuffle('x', 0, 'x', 'x'))
output_2 = activation(T.dot(output_1.flatten(2), params[2]) + params[3])
final_output = T.dot(output_2, params[0]) + params[1]
p_y_given_x = T.nnet.softmax(final_output)
y_pred = T.argmax(p_y_given_x, axis=1)
testfunc = theano.function([], [y_pred[0]])
prediction = testfunc()[0]
correct = (int(test_img_value) == prediction)
print('The prediction ' + str(testfunc()[0]) + ' for ' + testImgFilename + ' is ' + str(correct) + '.')
return correct
if __name__ == '__main__':
logging.basicConfig(filename=logfilename, level=logging.INFO)
evaluate_lenet5()
|
|
""" Cisco_IOS_XR_tunnel_vpdn_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-vpdn package configuration.
This module contains definitions
for the following management objects\:
vpdn\: VPDN configuration
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class DfBitEnum(Enum):
"""
DfBitEnum
Df bit
.. data:: clear = 0
Clear df bit
.. data:: reflect = 1
Reflect df bit from inner ip header
.. data:: set = 2
Set df bit
"""
clear = 0
reflect = 1
set = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['DfBitEnum']
class OptionEnum(Enum):
"""
OptionEnum
Option
.. data:: local = 1
Log VPDN events locally
.. data:: user = 2
Log VPDN user events
.. data:: dead_cache = 8
Log VPDN dead cache
.. data:: tunnel_drop = 16
Log VPDN tunnel drops
"""
local = 1
user = 2
dead_cache = 8
tunnel_drop = 16
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['OptionEnum']
class Vpdn(object):
"""
VPDN configuration
.. attribute:: caller_id
Options to apply on calling station ID
**type**\: :py:class:`CallerId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.CallerId>`
.. attribute:: enable
Enable VPDN configuration. Deletion of this object also causes deletion of all associated objects under VPDN
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: history
VPDN history logging
**type**\: :py:class:`History <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.History>`
.. attribute:: l2tp
L2TPv2 protocol commands
**type**\: :py:class:`L2Tp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.L2Tp>`
.. attribute:: local
VPDN Local radius process configuration
**type**\: :py:class:`Local <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Local>`
.. attribute:: loggings
Table of Logging
**type**\: :py:class:`Loggings <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Loggings>`
.. attribute:: redundancy
Enable VPDN redundancy
**type**\: :py:class:`Redundancy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Redundancy>`
.. attribute:: session_limit
Maximum simultaneous VPDN sessions
**type**\: int
**range:** 1..131072
.. attribute:: soft_shut
New session no longer allowed
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: templates
Table of Template
**type**\: :py:class:`Templates <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates>`
.. attribute:: vpd_ngroups
Table of VPDNgroup
**type**\: :py:class:`VpdNgroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.VpdNgroups>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.caller_id = Vpdn.CallerId()
self.caller_id.parent = self
self.enable = None
self.history = Vpdn.History()
self.history.parent = self
self.l2tp = Vpdn.L2Tp()
self.l2tp.parent = self
self.local = Vpdn.Local()
self.local.parent = self
self.loggings = Vpdn.Loggings()
self.loggings.parent = self
self.redundancy = Vpdn.Redundancy()
self.redundancy.parent = self
self.session_limit = None
self.soft_shut = None
self.templates = Vpdn.Templates()
self.templates.parent = self
self.vpd_ngroups = Vpdn.VpdNgroups()
self.vpd_ngroups.parent = self
class History(object):
"""
VPDN history logging
.. attribute:: failure
User failure
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.failure = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:history'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.failure is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.History']['meta_info']
class Redundancy(object):
"""
Enable VPDN redundancy
.. attribute:: enable
Enable Enable VPDN redundancy. Deletion of this object also causes deletion of all associated objects under Redundancy
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: process_failures
Process crash configuration
**type**\: :py:class:`ProcessFailures <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Redundancy.ProcessFailures>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.process_failures = Vpdn.Redundancy.ProcessFailures()
self.process_failures.parent = self
class ProcessFailures(object):
"""
Process crash configuration
.. attribute:: switchover
Force a switchover if the process crashes
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.switchover = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:redundancy/Cisco-IOS-XR-tunnel-vpdn-cfg:process-failures'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.switchover is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Redundancy.ProcessFailures']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:redundancy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.enable is not None:
return True
if self.process_failures is not None and self.process_failures._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Redundancy']['meta_info']
class Local(object):
"""
VPDN Local radius process configuration
.. attribute:: cache_disabled
Set constant integer
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: path
local path of the saved profile
**type**\: str
**length:** 1..64
.. attribute:: port
port value
**type**\: int
**range:** 1..65535
.. attribute:: secret_text
secret password
**type**\: str
**length:** 1..32
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.cache_disabled = None
self.path = None
self.port = None
self.secret_text = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:local'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.cache_disabled is not None:
return True
if self.path is not None:
return True
if self.port is not None:
return True
if self.secret_text is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Local']['meta_info']
class Templates(object):
"""
Table of Template
.. attribute:: template
VPDN template configuration
**type**\: list of :py:class:`Template <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.template = YList()
self.template.parent = self
self.template.name = 'template'
class Template(object):
"""
VPDN template configuration
.. attribute:: template_name <key>
VPDN template name
**type**\: str
**length:** 1..63
.. attribute:: caller_id
Options to apply on calling station id
**type**\: :py:class:`CallerId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template.CallerId>`
.. attribute:: description
Up to 100 characters describing this VPDN template
**type**\: str
**length:** 1..100
.. attribute:: dsl_line_forwarding
Forward DSL Line Info attributes
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ip
Set IP TOS value
**type**\: :py:class:`Ip <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template.Ip>`
.. attribute:: ipv4
IPv4 settings for tunnel
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template.Ipv4>`
.. attribute:: l2tp_class
L2TP class command
**type**\: str
**length:** 1..79
.. attribute:: tunnel
L2TP tunnel commands
**type**\: :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template.Tunnel>`
.. attribute:: vpn
VPN ID/VRF name
**type**\: :py:class:`Vpn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template.Vpn>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.template_name = None
self.caller_id = Vpdn.Templates.Template.CallerId()
self.caller_id.parent = self
self.description = None
self.dsl_line_forwarding = None
self.ip = Vpdn.Templates.Template.Ip()
self.ip.parent = self
self.ipv4 = Vpdn.Templates.Template.Ipv4()
self.ipv4.parent = self
self.l2tp_class = None
self.tunnel = Vpdn.Templates.Template.Tunnel()
self.tunnel.parent = self
self.vpn = Vpdn.Templates.Template.Vpn()
self.vpn.parent = self
class CallerId(object):
"""
Options to apply on calling station id
.. attribute:: mask
Mask characters by method
**type**\: str
**length:** 1..63
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mask = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:caller-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.mask is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template.CallerId']['meta_info']
class Vpn(object):
"""
VPN ID/VRF name
.. attribute:: id
VPN ID
**type**\: :py:class:`Id <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Templates.Template.Vpn.Id>`
.. attribute:: vrf
VRF name
**type**\: str
**length:** 1..32
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.id = Vpdn.Templates.Template.Vpn.Id()
self.id.parent = self
self.vrf = None
class Id(object):
"""
VPN ID
.. attribute:: index
VPN ID, (OUI\:VPN\-Index) format(hex), 4 bytes VPN\_Index Part
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: oui
VPN ID, (OUI\:VPN\-Index) format(hex), 3 bytes OUI Part
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.index = None
self.oui = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.index is not None:
return True
if self.oui is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template.Vpn.Id']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:vpn'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.id is not None and self.id._has_data():
return True
if self.vrf is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template.Vpn']['meta_info']
class Tunnel(object):
"""
L2TP tunnel commands
.. attribute:: busy_timeout
Busy time out value in seconds
**type**\: int
**range:** 60..65535
**units**\: second
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.busy_timeout = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:tunnel'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.busy_timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template.Tunnel']['meta_info']
class Ip(object):
"""
Set IP TOS value
.. attribute:: tos
Set constant integer
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tos = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:ip'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.tos is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template.Ip']['meta_info']
class Ipv4(object):
"""
IPv4 settings for tunnel
.. attribute:: df_bit
IPv4 don't fragment bit set/clear/reflect
**type**\: :py:class:`DfBitEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.DfBitEnum>`
.. attribute:: source
Enter an IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.df_bit = None
self.source = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.df_bit is not None:
return True
if self.source is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template.Ipv4']['meta_info']
@property
def _common_path(self):
if self.template_name is None:
raise YPYModelError('Key property template_name is None')
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:templates/Cisco-IOS-XR-tunnel-vpdn-cfg:template[Cisco-IOS-XR-tunnel-vpdn-cfg:template-name = ' + str(self.template_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.template_name is not None:
return True
if self.caller_id is not None and self.caller_id._has_data():
return True
if self.description is not None:
return True
if self.dsl_line_forwarding is not None:
return True
if self.ip is not None and self.ip._has_data():
return True
if self.ipv4 is not None and self.ipv4._has_data():
return True
if self.l2tp_class is not None:
return True
if self.tunnel is not None and self.tunnel._has_data():
return True
if self.vpn is not None and self.vpn._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates.Template']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:templates'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.template is not None:
for child_ref in self.template:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Templates']['meta_info']
class CallerId(object):
"""
Options to apply on calling station ID
.. attribute:: mask
Mask characters by method
**type**\: str
**length:** 1..63
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mask = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:caller-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.mask is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.CallerId']['meta_info']
class VpdNgroups(object):
"""
Table of VPDNgroup
.. attribute:: vpd_ngroup
vpdn\-group configuration
**type**\: list of :py:class:`VpdNgroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.VpdNgroups.VpdNgroup>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vpd_ngroup = YList()
self.vpd_ngroup.parent = self
self.vpd_ngroup.name = 'vpd_ngroup'
class VpdNgroup(object):
"""
vpdn\-group configuration
.. attribute:: vpd_ngroupname <key>
vpdn\-group name
**type**\: str
**length:** 1..63
.. attribute:: attribute
match substring
**type**\: str
**length:** 1..63
.. attribute:: desc
upto 100 characters describing this VPDN group
**type**\: str
**length:** 1..100
.. attribute:: dsl_line_forwarding
Forward DSL Line Info attributes
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ip
set ip tos value
**type**\: :py:class:`Ip <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.VpdNgroups.VpdNgroup.Ip>`
.. attribute:: l2tp_class
l2tp class name
**type**\: str
**length:** 1..79
.. attribute:: sr_ctemplate
Source vpdn\-template
**type**\: str
**length:** 1..63
.. attribute:: tunnel_busy_timeout
Busy list timeout length
**type**\: int
**range:** 1..65535
.. attribute:: vpn_id
Vpn id
**type**\: :py:class:`VpnId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.VpdNgroups.VpdNgroup.VpnId>`
.. attribute:: vrf_name
Vrf name
**type**\: str
**length:** 1..32
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vpd_ngroupname = None
self.attribute = None
self.desc = None
self.dsl_line_forwarding = None
self.ip = Vpdn.VpdNgroups.VpdNgroup.Ip()
self.ip.parent = self
self.l2tp_class = None
self.sr_ctemplate = None
self.tunnel_busy_timeout = None
self.vpn_id = Vpdn.VpdNgroups.VpdNgroup.VpnId()
self.vpn_id.parent = self
self.vrf_name = None
class VpnId(object):
"""
Vpn id
.. attribute:: vpn_id_index
VPN ID, (OUI\:VPN\-Index) format(hex), 4 bytes VPN\_Index Part
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: vpn_id_oui
VPN ID, (OUI\:VPN\-Index) format(hex), 3 bytes OUI Part
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vpn_id_index = None
self.vpn_id_oui = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:vpn-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vpn_id_index is not None:
return True
if self.vpn_id_oui is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.VpdNgroups.VpdNgroup.VpnId']['meta_info']
class Ip(object):
"""
set ip tos value
.. attribute:: tos
ip tos value
**type**\: int
**range:** 0..255
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tos = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-vpdn-cfg:ip'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.tos is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.VpdNgroups.VpdNgroup.Ip']['meta_info']
@property
def _common_path(self):
if self.vpd_ngroupname is None:
raise YPYModelError('Key property vpd_ngroupname is None')
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:vpd-ngroups/Cisco-IOS-XR-tunnel-vpdn-cfg:vpd-ngroup[Cisco-IOS-XR-tunnel-vpdn-cfg:vpd-ngroupname = ' + str(self.vpd_ngroupname) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vpd_ngroupname is not None:
return True
if self.attribute is not None:
return True
if self.desc is not None:
return True
if self.dsl_line_forwarding is not None:
return True
if self.ip is not None and self.ip._has_data():
return True
if self.l2tp_class is not None:
return True
if self.sr_ctemplate is not None:
return True
if self.tunnel_busy_timeout is not None:
return True
if self.vpn_id is not None and self.vpn_id._has_data():
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.VpdNgroups.VpdNgroup']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:vpd-ngroups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.vpd_ngroup is not None:
for child_ref in self.vpd_ngroup:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.VpdNgroups']['meta_info']
class Loggings(object):
"""
Table of Logging
.. attribute:: logging
Configure logging for VPDN
**type**\: list of :py:class:`Logging <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.Loggings.Logging>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.logging = YList()
self.logging.parent = self
self.logging.name = 'logging'
class Logging(object):
"""
Configure logging for VPDN
.. attribute:: option <key>
VPDN logging options
**type**\: :py:class:`OptionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.OptionEnum>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.option = None
@property
def _common_path(self):
if self.option is None:
raise YPYModelError('Key property option is None')
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:loggings/Cisco-IOS-XR-tunnel-vpdn-cfg:logging[Cisco-IOS-XR-tunnel-vpdn-cfg:option = ' + str(self.option) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.option is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Loggings.Logging']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:loggings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.logging is not None:
for child_ref in self.logging:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.Loggings']['meta_info']
class L2Tp(object):
"""
L2TPv2 protocol commands
.. attribute:: reassembly
L2TP IP packet reassembly enable
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: session_id
Session ID commands
**type**\: :py:class:`SessionId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.L2Tp.SessionId>`
.. attribute:: tcp_mss_adjust
TCP MSS adjust value. The acceptable values might be further limited depending on platform
**type**\: int
**range:** 1280..1460
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.reassembly = None
self.session_id = Vpdn.L2Tp.SessionId()
self.session_id.parent = self
self.tcp_mss_adjust = None
class SessionId(object):
"""
Session ID commands
.. attribute:: space
Session ID space commands
**type**\: :py:class:`Space <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_vpdn_cfg.Vpdn.L2Tp.SessionId.Space>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.space = Vpdn.L2Tp.SessionId.Space()
self.space.parent = self
class Space(object):
"""
Session ID space commands
.. attribute:: hierarchy
Session ID space hierarchical command
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'tunnel-vpdn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hierarchy = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:l2tp/Cisco-IOS-XR-tunnel-vpdn-cfg:session-id/Cisco-IOS-XR-tunnel-vpdn-cfg:space'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.hierarchy is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.L2Tp.SessionId.Space']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:l2tp/Cisco-IOS-XR-tunnel-vpdn-cfg:session-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.space is not None and self.space._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.L2Tp.SessionId']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn/Cisco-IOS-XR-tunnel-vpdn-cfg:l2tp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.reassembly is not None:
return True
if self.session_id is not None and self.session_id._has_data():
return True
if self.tcp_mss_adjust is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn.L2Tp']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-vpdn-cfg:vpdn'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.caller_id is not None and self.caller_id._has_data():
return True
if self.enable is not None:
return True
if self.history is not None and self.history._has_data():
return True
if self.l2tp is not None and self.l2tp._has_data():
return True
if self.local is not None and self.local._has_data():
return True
if self.loggings is not None and self.loggings._has_data():
return True
if self.redundancy is not None and self.redundancy._has_data():
return True
if self.session_limit is not None:
return True
if self.soft_shut is not None:
return True
if self.templates is not None and self.templates._has_data():
return True
if self.vpd_ngroups is not None and self.vpd_ngroups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_vpdn_cfg as meta
return meta._meta_table['Vpdn']['meta_info']
|
|
#!/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import string
import time
import datetime
import MySQLdb
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("alert_os")
path='./include'
sys.path.insert(0,path)
import functions as func
import sendmail
import sendsms_fx
import sendsms_api
send_mail_max_count = func.get_option('send_mail_max_count')
send_mail_sleep_time = func.get_option('send_mail_sleep_time')
mail_to_list_common = func.get_option('send_mail_to_list')
send_sms_max_count = func.get_option('send_sms_max_count')
send_sms_sleep_time = func.get_option('send_sms_sleep_time')
sms_to_list_common = func.get_option('send_sms_to_list')
g_alert = str(func.get_option('alert'))
#################################################################################################
def gen_alert_os_status(os_ip):
if g_alert != "1":
return -1
sql = """SELECT a.ip,
a.hostname,
a.connect,
a.process,
a.load_1,
a.cpu_idle_time,
a.mem_usage_rate,
a.create_time,
b.tags,
b.alarm_os_process,
b.alarm_os_load,
b.alarm_os_cpu,
b.alarm_os_memory,
b.threshold_warning_os_process,
b.threshold_critical_os_process,
b.threshold_warning_os_load,
b.threshold_critical_os_load,
b.threshold_warning_os_cpu,
b.threshold_critical_os_cpu,
b.threshold_warning_os_memory,
b.threshold_critical_os_memory,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.send_wx
FROM os_status a, db_cfg_os b
WHERE a.ip = b.host
and a.ip = '%s' """ %(os_ip)
result=func.mysql_query(sql)
if result <> 0:
for line in result:
host=line[0]
hostname=line[1]
connect=line[2]
process=line[3]
load_1=line[4]
cpu_idle=line[5]
memory_usage=line[6]
create_time=line[7]
tags=line[8]
alarm_os_process=line[9]
alarm_os_load=line[10]
alarm_os_cpu=line[11]
alarm_os_memory=line[12]
threshold_warning_os_process=line[13]
threshold_critical_os_process=line[14]
threshold_warning_os_load=line[15]
threshold_critical_os_load=line[16]
threshold_warning_os_cpu=line[17]
threshold_critical_os_cpu=line[18]
threshold_warning_os_memory=line[19]
threshold_critical_os_memory=line[20]
send_mail=line[21]
send_mail_to_list=line[22]
send_sms=line[23]
send_sms_to_list=line[24]
send_wx=line[25]
server_id=0
tags=tags
db_type="os"
port=''
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(host,db_type,'connect_server',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'connect_server',send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(host,db_type,'connect_server',send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'connect_server','down','critical','connect server fail',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('connect','3',server_id, host, db_type,create_time,'connect_server','down','critical')
func.update_db_status('process','-1',server_id, host, db_type,'','','','')
func.update_db_status('load_1','-1',server_id, host, db_type,'','','','')
func.update_db_status('cpu','-1',server_id, host, db_type,'','','','')
func.update_db_status('memory','-1',server_id, host, db_type,'','','','')
func.update_db_status('network','-1',server_id, host, db_type,'','','','')
func.update_db_status('disk','-1',server_id, host, db_type,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect_server','up','connect server success',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('connect',1,server_id, host, db_type,create_time,'connect_server','up','ok')
if int(alarm_os_process)==1:
if int(process) >= int(threshold_critical_os_process):
#send_mail = func.update_send_mail_status(host,db_type,'process',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'process',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'process',process,'critical','too more process running',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('process',3,server_id, host, db_type,create_time,'process',process,'critical')
elif int(process) >= int(threshold_warning_os_process):
#send_mail = func.update_send_mail_status(host,db_type,'process',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'process',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'process',process,'warning','too more process running',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('process',2,server_id, host, db_type,create_time,'process',process,'warning')
else:
func.update_db_status('process',1,server_id, host, db_type,create_time,'process',process,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'process',process,'process running ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
if int(alarm_os_load)==1:
if int(load_1) ==-1:
func.update_db_status('load_1','-1',server_id, host, db_type,'','','','')
elif int(load_1) >= int(threshold_critical_os_load):
#send_mail = func.update_send_mail_status(host,db_type,'load',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'load',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'load',load_1,'critical','too high load',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('load_1',3,server_id, host, db_type,create_time,'load',load_1,'critical')
elif int(load_1) >= int(threshold_warning_os_load):
#send_mail = func.update_send_mail_status(server_id,db_type,'load',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'load',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'load',load_1,'warning','too high load',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('load_1',2,server_id, host, db_type,create_time,'load',load_1,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'load',load_1,'load ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('load_1',1,server_id, host, db_type,create_time,'load',load_1,'ok')
if int(alarm_os_cpu)==1:
threshold_critical_os_cpu = int(100-threshold_critical_os_cpu)
threshold_warning_os_cpu = int(100-threshold_warning_os_cpu)
if int(cpu_idle) <= int(threshold_critical_os_cpu):
send_mail = func.update_send_mail_status(host,db_type,'cpu_idle',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'cpu_idle',send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(host,db_type,'cpu_idle',send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'cpu_idle',str(cpu_idle)+'%','critical','too little cpu idle',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('cpu',3,server_id, host, db_type,create_time,'cpu_idle',str(cpu_idle)+'%','critical')
elif int(cpu_idle) <= int(threshold_warning_os_cpu):
send_mail = func.update_send_mail_status(host,db_type,'cpu_idle',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'cpu_idle',send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(host,db_type,'cpu_idle',send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'cpu_idle',str(cpu_idle)+'%','warning','too little cpu idle',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('cpu',2,server_id, host, db_type,create_time,'cpu_idle',str(cpu_idle)+'%','warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'cpu_idle',str(cpu_idle)+'%','cpu idle ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('cpu',1,server_id, host, db_type,create_time,'cpu_idle',str(cpu_idle)+'%','ok')
if int(alarm_os_memory)==1:
if memory_usage:
memory_usage_int = int(memory_usage.split('%')[0])
else:
memory_usage_int = 0
if int(memory_usage_int) >= int(threshold_critical_os_memory):
#send_mail = func.update_send_mail_status(host,db_type,'memory',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'memory',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'memory',memory_usage,'critical','too more memory usage',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('memory',3,server_id, host, db_type,create_time,'memory',memory_usage,'critical')
elif int(memory_usage_int) >= int(threshold_warning_os_memory):
#send_mail = func.update_send_mail_status(host,db_type,'memory',send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'memory',send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'memory',memory_usage,'warning','too more memory usage',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('memory',2,server_id, host, db_type,create_time,'memory',memory_usage,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'memory',memory_usage,'memory usage ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('memory',1,server_id, host, db_type,create_time,'memory',memory_usage,'ok')
else:
pass
def gen_alert_os_disk(os_ip):
if g_alert != "1":
return -1
sql="""SELECT a.ip,
a.mounted,
a.used_rate,
a.create_time,
b.tags,
b.alarm_os_disk,
b.threshold_warning_os_disk,
b.threshold_critical_os_disk,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.send_wx
FROM os_disk a, db_cfg_os b
WHERE a.ip = b.host
AND a.ip = '%s' """ %(os_ip)
result=func.mysql_query(sql)
if result <> 0:
for line in result:
host=line[0]
mounted=line[1]
used_rate=line[2]
create_time=line[3]
tags=line[4]
alarm_os_disk=line[5]
threshold_warning_os_disk=line[6]
threshold_critical_os_disk=line[7]
send_mail=line[8]
send_mail_to_list=line[9]
send_sms=line[10]
send_sms_to_list=line[11]
send_wx=line[12]
server_id=0
tags=tags
db_type="os"
port=''
used_rate_arr=used_rate.split("%")
used_rate_int=int(used_rate_arr[0])
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_os_disk)==1:
#logger.info('disk_usage(%s)' %(mounted))
if int(used_rate_int) >= int(threshold_critical_os_disk):
send_mail = func.update_send_mail_status(host,db_type,'disk_usage(%s)' %(mounted),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'disk_usage(%s)' %(mounted),send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(host,db_type,'disk_usage(%s)' %(mounted),send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'disk_usage(%s)' %(mounted),used_rate,'critical','disk %s usage reach %s' %(mounted,used_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('disk',3,server_id, host, db_type,create_time,'disk_usage(%s)' %(mounted),used_rate,'critical')
elif int(used_rate_int) >= int(threshold_warning_os_disk):
send_mail = func.update_send_mail_status(host,db_type,'disk_usage(%s)' %(mounted),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'disk_usage(%s)' %(mounted),send_sms,send_sms_max_count)
send_wx = func.update_send_wx_status(host,db_type,'disk_usage(%s)' %(mounted),send_wx)
func.add_alert(server_id,tags,host,port,create_time,db_type,'disk_usage(%s)' %(mounted),used_rate,'warning','disk %s usage reach %s' %(mounted,used_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('disk',2,server_id, host, db_type,create_time,'disk_usage(%s)' %(mounted),used_rate,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'disk_usage(%s)' %(mounted),used_rate,'disk %s usage ok' %(mounted),send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('disk',1,server_id, host, db_type,create_time,'disk_usage','max(%s:%s)' %(mounted,used_rate),'ok')
else:
pass
def gen_alert_os_network(os_ip):
if g_alert != "1":
return -1
sql="""SELECT a.ip,
a.if_descr,
a.in_bytes,
a.out_bytes,
sum(in_bytes + out_bytes) sum_bytes,
a.create_time,
b.tags,
b.alarm_os_network,
b.threshold_warning_os_network,
b.threshold_critical_os_network,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list
FROM os_net a, db_cfg_os b
WHERE a.ip = b.host
AND a.ip = '%s'
GROUP BY ip, if_descr
ORDER BY sum(in_bytes + out_bytes) ASC """ %(os_ip)
result=func.mysql_query(sql)
if result <> 0:
for line in result:
host=line[0]
if_descr=line[1]
in_bytes=line[2]
out_bytes=line[3]
sum_bytes=line[4]
create_time=line[5]
tags=line[6]
alarm_os_network=line[7]
threshold_warning_os_network=(line[8])*1024*1024
threshold_critical_os_network=(line[9])*1024*1024
send_mail=line[10]
send_mail_to_list=line[11]
send_sms=line[12]
send_sms_to_list=line[13]
server_id=0
tags=tags
db_type="os"
port=''
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_os_network)==1:
if int(sum_bytes) >= int(threshold_critical_os_network):
#send_mail = func.update_send_mail_status(host,db_type,'network(%s)' %(if_descr),send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'network(%s)' %(if_descr),send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'network(%s)' %(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'critical','network %s bytes reach %s' %(if_descr,sum_bytes),send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('network',3,server_id, host, db_type,create_time,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'critical')
elif int(sum_bytes) >= int(threshold_warning_os_network):
#send_mail = func.update_send_mail_status(host,db_type,'network(%s)' %(if_descr),send_mail,send_mail_max_count)
#send_sms = func.update_send_sms_status(host,db_type,'network(%s)' %(if_descr),send_sms,send_sms_max_count)
#func.add_alert(server_id,tags,host,port,create_time,db_type,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'warning','network %s bytes reach %s' %(if_descr,sum_bytes),send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('network',2,server_id, host, db_type,create_time,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'network %s bytes ok' %(if_descr),send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_wx)
func.update_db_status('network',1,server_id, host, db_type,create_time,'network','max(%s-in:%s,out:%s)' %(if_descr,in_bytes,out_bytes),'ok')
else:
pass
##############################################################################
# function main
##############################################################################
def main():
logger.info("alert os test begin.")
ip = "192.168.210.210"
#gen_alert_os_status(ip)
#gen_alert_os_disk(ip)
#gen_alert_os_network(ip)
logger.info("alert os test finished.")
if __name__ == '__main__':
main()
|
|
"""Provide functionality for TTS."""
import asyncio
import functools as ft
import hashlib
import io
import logging
import mimetypes
import os
import re
from typing import Dict, Optional
from aiohttp import web
import mutagen
from mutagen.id3 import ID3, TextFrame as ID3Text
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as DOMAIN_MP,
MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PLATFORM,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
HTTP_OK,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_prepare_setup_platform
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_CACHE = "cache"
ATTR_LANGUAGE = "language"
ATTR_MESSAGE = "message"
ATTR_OPTIONS = "options"
ATTR_PLATFORM = "platform"
BASE_URL_KEY = "tts_base_url"
CONF_BASE_URL = "base_url"
CONF_CACHE = "cache"
CONF_CACHE_DIR = "cache_dir"
CONF_LANG = "language"
CONF_SERVICE_NAME = "service_name"
CONF_TIME_MEMORY = "time_memory"
DEFAULT_CACHE = True
DEFAULT_CACHE_DIR = "tts"
DEFAULT_TIME_MEMORY = 300
DOMAIN = "tts"
MEM_CACHE_FILENAME = "filename"
MEM_CACHE_VOICE = "voice"
SERVICE_CLEAR_CACHE = "clear_cache"
SERVICE_SAY = "say"
_RE_VOICE_FILE = re.compile(r"([a-f0-9]{40})_([^_]+)_([^_]+)_([a-z_]+)\.[a-z0-9]{3,4}")
KEY_PATTERN = "{0}_{1}_{2}_{3}"
def _deprecated_platform(value):
"""Validate if platform is deprecated."""
if value == "google":
raise vol.Invalid(
"google tts service has been renamed to google_translate,"
" please update your configuration."
)
return value
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): vol.All(cv.string, _deprecated_platform),
vol.Optional(CONF_CACHE, default=DEFAULT_CACHE): cv.boolean,
vol.Optional(CONF_CACHE_DIR, default=DEFAULT_CACHE_DIR): cv.string,
vol.Optional(CONF_TIME_MEMORY, default=DEFAULT_TIME_MEMORY): vol.All(
vol.Coerce(int), vol.Range(min=60, max=57600)
),
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_SERVICE_NAME): cv.string,
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SCHEMA_SERVICE_SAY = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.string,
vol.Optional(ATTR_CACHE): cv.boolean,
vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_LANGUAGE): cv.string,
vol.Optional(ATTR_OPTIONS): dict,
}
)
SCHEMA_SERVICE_CLEAR_CACHE = vol.Schema({})
async def async_setup(hass, config):
"""Set up TTS."""
tts = SpeechManager(hass)
try:
conf = config[DOMAIN][0] if config.get(DOMAIN, []) else {}
use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE)
cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR)
time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY)
base_url = conf.get(CONF_BASE_URL) or get_url(hass)
hass.data[BASE_URL_KEY] = base_url
await tts.async_init_cache(use_cache, cache_dir, time_memory, base_url)
except (HomeAssistantError, KeyError):
_LOGGER.exception("Error on cache init")
return False
hass.http.register_view(TextToSpeechView(tts))
hass.http.register_view(TextToSpeechUrlView(tts))
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a TTS platform."""
if p_config is None:
p_config = {}
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return
try:
if hasattr(platform, "async_get_engine"):
provider = await platform.async_get_engine(
hass, p_config, discovery_info
)
else:
provider = await hass.async_add_executor_job(
platform.get_engine, hass, p_config, discovery_info
)
if provider is None:
_LOGGER.error("Error setting up platform %s", p_type)
return
tts.async_register_engine(p_type, provider, p_config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform: %s", p_type)
return
async def async_say_handle(service):
"""Service handle for say."""
entity_ids = service.data[ATTR_ENTITY_ID]
message = service.data.get(ATTR_MESSAGE)
cache = service.data.get(ATTR_CACHE)
language = service.data.get(ATTR_LANGUAGE)
options = service.data.get(ATTR_OPTIONS)
try:
url = await tts.async_get_url(
p_type, message, cache=cache, language=language, options=options
)
except HomeAssistantError as err:
_LOGGER.error("Error on init TTS: %s", err)
return
data = {
ATTR_MEDIA_CONTENT_ID: url,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_ENTITY_ID: entity_ids,
}
await hass.services.async_call(
DOMAIN_MP,
SERVICE_PLAY_MEDIA,
data,
blocking=True,
context=service.context,
)
service_name = p_config.get(CONF_SERVICE_NAME, f"{p_type}_{SERVICE_SAY}")
hass.services.async_register(
DOMAIN, service_name, async_say_handle, schema=SCHEMA_SERVICE_SAY
)
setup_tasks = [
async_setup_platform(p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
async def async_clear_cache_handle(service):
"""Handle clear cache service call."""
await tts.async_clear_cache()
hass.services.async_register(
DOMAIN,
SERVICE_CLEAR_CACHE,
async_clear_cache_handle,
schema=SCHEMA_SERVICE_CLEAR_CACHE,
)
return True
def _hash_options(options: Dict) -> str:
"""Hashes an options dictionary."""
opts_hash = hashlib.blake2s(digest_size=5)
for key, value in sorted(options.items()):
opts_hash.update(str(key).encode())
opts_hash.update(str(value).encode())
return opts_hash.hexdigest()
class SpeechManager:
"""Representation of a speech store."""
def __init__(self, hass):
"""Initialize a speech store."""
self.hass = hass
self.providers = {}
self.use_cache = DEFAULT_CACHE
self.cache_dir = DEFAULT_CACHE_DIR
self.time_memory = DEFAULT_TIME_MEMORY
self.base_url = None
self.file_cache = {}
self.mem_cache = {}
async def async_init_cache(self, use_cache, cache_dir, time_memory, base_url):
"""Init config folder and load file cache."""
self.use_cache = use_cache
self.time_memory = time_memory
self.base_url = base_url
try:
self.cache_dir = await self.hass.async_add_executor_job(
_init_tts_cache_dir, self.hass, cache_dir
)
except OSError as err:
raise HomeAssistantError(f"Can't init cache dir {err}") from err
try:
cache_files = await self.hass.async_add_executor_job(
_get_cache_files, self.cache_dir
)
except OSError as err:
raise HomeAssistantError(f"Can't read cache dir {err}") from err
if cache_files:
self.file_cache.update(cache_files)
async def async_clear_cache(self):
"""Read file cache and delete files."""
self.mem_cache = {}
def remove_files():
"""Remove files from filesystem."""
for filename in self.file_cache.values():
try:
os.remove(os.path.join(self.cache_dir, filename))
except OSError as err:
_LOGGER.warning("Can't remove cache file '%s': %s", filename, err)
await self.hass.async_add_executor_job(remove_files)
self.file_cache = {}
@callback
def async_register_engine(self, engine, provider, config):
"""Register a TTS provider."""
provider.hass = self.hass
if provider.name is None:
provider.name = engine
self.providers[engine] = provider
async def async_get_url(
self, engine, message, cache=None, language=None, options=None
):
"""Get URL for play message.
This method is a coroutine.
"""
provider = self.providers[engine]
msg_hash = hashlib.sha1(bytes(message, "utf-8")).hexdigest()
use_cache = cache if cache is not None else self.use_cache
# Languages
language = language or provider.default_language
if language is None or language not in provider.supported_languages:
raise HomeAssistantError(f"Not supported language {language}")
# Options
if provider.default_options and options:
merged_options = provider.default_options.copy()
merged_options.update(options)
options = merged_options
options = options or provider.default_options
if options is not None:
invalid_opts = [
opt_name
for opt_name in options.keys()
if opt_name not in (provider.supported_options or [])
]
if invalid_opts:
raise HomeAssistantError(f"Invalid options found: {invalid_opts}")
options_key = _hash_options(options)
else:
options_key = "-"
key = KEY_PATTERN.format(
msg_hash, language.replace("_", "-"), options_key, engine
).lower()
# Is speech already in memory
if key in self.mem_cache:
filename = self.mem_cache[key][MEM_CACHE_FILENAME]
# Is file store in file cache
elif use_cache and key in self.file_cache:
filename = self.file_cache[key]
self.hass.async_create_task(self.async_file_to_mem(key))
# Load speech from provider into memory
else:
filename = await self.async_get_tts_audio(
engine, key, message, use_cache, language, options
)
return f"{self.base_url}/api/tts_proxy/{filename}"
async def async_get_tts_audio(self, engine, key, message, cache, language, options):
"""Receive TTS and store for view in cache.
This method is a coroutine.
"""
provider = self.providers[engine]
extension, data = await provider.async_get_tts_audio(message, language, options)
if data is None or extension is None:
raise HomeAssistantError(f"No TTS from {engine} for '{message}'")
# Create file infos
filename = f"{key}.{extension}".lower()
# Validate filename
if not _RE_VOICE_FILE.match(filename):
raise HomeAssistantError(
f"TTS filename '{filename}' from {engine} is invalid!"
)
# Save to memory
data = self.write_tags(filename, data, provider, message, language, options)
self._async_store_to_memcache(key, filename, data)
if cache:
self.hass.async_create_task(self.async_save_tts_audio(key, filename, data))
return filename
async def async_save_tts_audio(self, key, filename, data):
"""Store voice data to file and file_cache.
This method is a coroutine.
"""
voice_file = os.path.join(self.cache_dir, filename)
def save_speech():
"""Store speech to filesystem."""
with open(voice_file, "wb") as speech:
speech.write(data)
try:
await self.hass.async_add_executor_job(save_speech)
self.file_cache[key] = filename
except OSError as err:
_LOGGER.error("Can't write %s: %s", filename, err)
async def async_file_to_mem(self, key):
"""Load voice from file cache into memory.
This method is a coroutine.
"""
filename = self.file_cache.get(key)
if not filename:
raise HomeAssistantError(f"Key {key} not in file cache!")
voice_file = os.path.join(self.cache_dir, filename)
def load_speech():
"""Load a speech from filesystem."""
with open(voice_file, "rb") as speech:
return speech.read()
try:
data = await self.hass.async_add_executor_job(load_speech)
except OSError as err:
del self.file_cache[key]
raise HomeAssistantError(f"Can't read {voice_file}") from err
self._async_store_to_memcache(key, filename, data)
@callback
def _async_store_to_memcache(self, key, filename, data):
"""Store data to memcache and set timer to remove it."""
self.mem_cache[key] = {MEM_CACHE_FILENAME: filename, MEM_CACHE_VOICE: data}
@callback
def async_remove_from_mem():
"""Cleanup memcache."""
self.mem_cache.pop(key, None)
self.hass.loop.call_later(self.time_memory, async_remove_from_mem)
async def async_read_tts(self, filename):
"""Read a voice file and return binary.
This method is a coroutine.
"""
record = _RE_VOICE_FILE.match(filename.lower())
if not record:
raise HomeAssistantError("Wrong tts file format!")
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3), record.group(4)
)
if key not in self.mem_cache:
if key not in self.file_cache:
raise HomeAssistantError(f"{key} not in cache!")
await self.async_file_to_mem(key)
content, _ = mimetypes.guess_type(filename)
return content, self.mem_cache[key][MEM_CACHE_VOICE]
@staticmethod
def write_tags(filename, data, provider, message, language, options):
"""Write ID3 tags to file.
Async friendly.
"""
data_bytes = io.BytesIO(data)
data_bytes.name = filename
data_bytes.seek(0)
album = provider.name
artist = language
if options is not None and options.get("voice") is not None:
artist = options.get("voice")
try:
tts_file = mutagen.File(data_bytes)
if tts_file is not None:
if not tts_file.tags:
tts_file.add_tags()
if isinstance(tts_file.tags, ID3):
tts_file["artist"] = ID3Text(encoding=3, text=artist)
tts_file["album"] = ID3Text(encoding=3, text=album)
tts_file["title"] = ID3Text(encoding=3, text=message)
else:
tts_file["artist"] = artist
tts_file["album"] = album
tts_file["title"] = message
tts_file.save(data_bytes)
except mutagen.MutagenError as err:
_LOGGER.error("ID3 tag error: %s", err)
return data_bytes.getvalue()
class Provider:
"""Represent a single TTS provider."""
hass: Optional[HomeAssistantType] = None
name: Optional[str] = None
@property
def default_language(self):
"""Return the default language."""
return None
@property
def supported_languages(self):
"""Return a list of supported languages."""
return None
@property
def supported_options(self):
"""Return a list of supported options like voice, emotionen."""
return None
@property
def default_options(self):
"""Return a dict include default options."""
return None
def get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider."""
raise NotImplementedError()
async def async_get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider.
Return a tuple of file extension and data as bytes.
"""
return await self.hass.async_add_executor_job(
ft.partial(self.get_tts_audio, message, language, options=options)
)
def _init_tts_cache_dir(hass, cache_dir):
"""Init cache folder."""
if not os.path.isabs(cache_dir):
cache_dir = hass.config.path(cache_dir)
if not os.path.isdir(cache_dir):
_LOGGER.info("Create cache dir %s", cache_dir)
os.mkdir(cache_dir)
return cache_dir
def _get_cache_files(cache_dir):
"""Return a dict of given engine files."""
cache = {}
folder_data = os.listdir(cache_dir)
for file_data in folder_data:
record = _RE_VOICE_FILE.match(file_data)
if record:
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3), record.group(4)
)
cache[key.lower()] = file_data.lower()
return cache
class TextToSpeechUrlView(HomeAssistantView):
"""TTS view to get a url to a generated speech file."""
requires_auth = True
url = "/api/tts_get_url"
name = "api:tts:geturl"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def post(self, request: web.Request) -> web.Response:
"""Generate speech and provide url."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON specified", HTTP_BAD_REQUEST)
if not data.get(ATTR_PLATFORM) and data.get(ATTR_MESSAGE):
return self.json_message(
"Must specify platform and message", HTTP_BAD_REQUEST
)
p_type = data[ATTR_PLATFORM]
message = data[ATTR_MESSAGE]
cache = data.get(ATTR_CACHE)
language = data.get(ATTR_LANGUAGE)
options = data.get(ATTR_OPTIONS)
try:
url = await self.tts.async_get_url(
p_type, message, cache=cache, language=language, options=options
)
resp = self.json({"url": url}, HTTP_OK)
except HomeAssistantError as err:
_LOGGER.error("Error on init tts: %s", err)
resp = self.json({"error": err}, HTTP_BAD_REQUEST)
return resp
class TextToSpeechView(HomeAssistantView):
"""TTS view to serve a speech audio."""
requires_auth = False
url = "/api/tts_proxy/{filename}"
name = "api:tts:speech"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def get(self, request: web.Request, filename: str) -> web.Response:
"""Start a get request."""
try:
content, data = await self.tts.async_read_tts(filename)
except HomeAssistantError as err:
_LOGGER.error("Error on load tts: %s", err)
return web.Response(status=HTTP_NOT_FOUND)
return web.Response(body=data, content_type=content)
def get_base_url(hass):
"""Get base URL."""
return hass.data[BASE_URL_KEY]
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_utils import timeutils
from oslo_versionedobjects import fixture
from nova.network import model as network_model
from nova.notifications import base as notification_base
from nova.notifications.objects import base as notification
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import test
from nova.tests.unit.objects import test_objects
class TestNotificationBase(test.NoDBTestCase):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
VERSION = '1.0'
fields = {
'field_1': fields.StringField(),
'field_2': fields.IntegerField(),
'not_important_field': fields.IntegerField(),
}
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
def populate_schema(self, source_field):
super(TestNotificationBase.TestNotificationPayload,
self).populate_schema(source_field=source_field)
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'extra_field': fields.StringField(), # filled by ctor
}
@notification.notification_sample('test-update-1.json')
@notification.notification_sample('test-update-2.json')
@base.NovaObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.NovaObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
fake_service = {
'created_at': timeutils.utcnow().replace(microsecond=0),
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'host': 'fake-host',
'binary': 'nova-fake',
'topic': 'fake-service-topic',
'report_count': 1,
'forced_down': False,
'disabled': False,
'disabled_reason': None,
'last_seen_up': None,
'version': 1}
expected_payload = {
'nova_object.name': 'TestNotificationPayload',
'nova_object.data': {
'extra_field': 'test string',
'field_1': 'test1',
'field_2': 42},
'nova_object.version': '1.0',
'nova_object.namespace': 'nova'}
def setUp(self):
super(TestNotificationBase, self).setUp()
with mock.patch('nova.db.service_update') as mock_db_service_update:
self.service_obj = objects.Service(context=mock.sentinel.context,
id=self.fake_service['id'])
self.service_obj.obj_reset_changes(['version'])
mock_db_service_update.return_value = self.fake_service
self.service_obj.save()
self.my_obj = self.TestObject(field_1='test1',
field_2=42,
not_important_field=13)
self.payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
self.notification = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE,
phase=fields.NotificationPhase.START),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type,
expected_payload):
mock_notifier.prepare.assert_called_once_with(
publisher_id='nova-fake:fake-host')
mock_notify = mock_notifier.prepare.return_value.info
self.assertTrue(mock_notify.called)
self.assertEqual(mock_notify.call_args[0][0], mock_context)
self.assertEqual(mock_notify.call_args[1]['event_type'],
expected_event_type)
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('nova.rpc.LEGACY_NOTIFIER')
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_notification(self, mock_notifier, mock_legacy):
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
self.notification.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update.start',
expected_payload=self.expected_payload)
self.assertFalse(mock_legacy.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(host='fake-host',
binary='nova-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('nova.rpc.LEGACY_NOTIFIER')
@mock.patch('nova.rpc.NOTIFIER')
def test_emit_event_type_without_phase(self, mock_notifier, mock_legacy):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
self.assertFalse(mock_legacy.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
non_populated_payload = self.TestNotificationPayload(
extra_field='test string')
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
self.assertRaises(AssertionError, noti.emit, mock_context)
self.assertFalse(mock_notifier.called)
@mock.patch('nova.rpc.NOTIFIER')
def test_empty_schema(self, mock_notifier):
non_populated_payload = self.TestNotificationPayloadEmptySchema(
extra_field='test string')
noti = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher.from_service_obj(
self.service_obj),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=
{'nova_object.name': 'TestNotificationPayloadEmptySchema',
'nova_object.data': {'extra_field': u'test string'},
'nova_object.version': '1.0',
'nova_object.namespace': 'nova'})
def test_sample_decorator(self):
self.assertEqual(2, len(self.TestNotification.samples))
self.assertIn('test-update-1.json', self.TestNotification.samples)
self.assertIn('test-update-2.json', self.TestNotification.samples)
notification_object_data = {
'AuditPeriodPayload': '1.0-28345f72ca9d805eeb61b2c2385805dd',
'BandwidthPayload': '1.0-49278639296f9939ff2c8947b2078a82',
'EventType': '1.3-6ef678bfe9a4ebfd669c96d2d2c124a5',
'ExceptionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ExceptionPayload': '1.0-4516ae282a55fe2fd5c754967ee6248b',
'FlavorPayload': '1.0-8ad962ab0bafc7270f474c7dda0b7c20',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.0-d94994d6043bb87fde603976ce811cba',
'InstancePayload': '1.0-4473793aa2a0a4083d328847f3ab638a',
'InstanceStateUpdatePayload': '1.0-a934d04e1b314318e42e8062647edd11',
'InstanceUpdateNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceUpdatePayload': '1.0-2e21e6950fbb04e701e54e8563a21dbc',
'IpPayload': '1.0-26b40117c41ed95a61ae104f0fcb5fdc',
'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545',
'ServiceStatusNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'ServiceStatusPayload': '1.0-a5e7b4fd6cc5581be45b31ff1f3a3f7f',
}
class TestNotificationObjectVersions(test.NoDBTestCase):
def setUp(self):
super(test.NoDBTestCase, self).setUp()
base.NovaObjectRegistry.register_notification_objects()
def test_versions(self):
checker = fixture.ObjectVersionChecker(
test_objects.get_nova_objects())
notification_object_data.update(test_objects.object_data)
expected, actual = checker.test_hashes(notification_object_data)
self.assertEqual(expected, actual,
'Some notification objects have changed; please make '
'sure the versions have been bumped, and then update '
'their hashes here.')
def test_notification_payload_version_depends_on_the_schema(self):
@base.NovaObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
checker = fixture.ObjectVersionChecker(
{'TestNotificationPayload': (TestNotificationPayload,)})
old_hash = checker.get_hashes(extra_data_func=get_extra_data)
TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
'field_3')
new_hash = checker.get_hashes(extra_data_func=get_extra_data)
self.assertNotEqual(old_hash, new_hash)
def get_extra_data(obj_class):
extra_data = tuple()
# Get the SCHEMA items to add to the fingerprint
# if we are looking at a notification
if issubclass(obj_class, notification.NotificationPayloadBase):
schema_data = collections.OrderedDict(
sorted(obj_class.SCHEMA.items()))
extra_data += (schema_data,)
return extra_data
class TestInstanceNotification(test.NoDBTestCase):
@mock.patch('nova.notifications.objects.instance.'
'InstanceUpdateNotification._emit')
def test_send_version_instance_update_uses_flavor(self, mock_emit):
# Make sure that the notification payload chooses the values in
# instance.flavor.$value instead of instance.$value
test_keys = ['memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb']
flavor_values = {k: 123 for k in test_keys}
instance_values = {k: 456 for k in test_keys}
flavor = objects.Flavor(**flavor_values)
info_cache = objects.InstanceInfoCache(
network_info=network_model.NetworkInfo())
instance = objects.Instance(
flavor=flavor,
info_cache=info_cache,
**instance_values)
payload = {
'bandwidth': {},
'audit_period_ending': timeutils.utcnow(),
'audit_period_beginning': timeutils.utcnow(),
}
notification_base._send_versioned_instance_update(
mock.MagicMock(),
instance,
payload,
'host',
'compute')
payload = mock_emit.call_args_list[0][1]['payload']['nova_object.data']
flavor_payload = payload['flavor']['nova_object.data']
data = {k: flavor_payload[k] for k in test_keys}
self.assertEqual(flavor_values, data)
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import json
import os
from dateutil import tz as tzutil
from .common import BaseTest, instance
from c7n.exceptions import PolicyValidationError
from c7n.filters.offhours import OffHour, OnHour, ScheduleParser, Time
from c7n.testing import mock_datetime_now
class OffHoursFilterTest(BaseTest):
"""[off|on] hours testing"""
def test_offhours_records(self):
session_factory = self.replay_flight_data("test_offhours_records")
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=8, day=14, hour=19, minute=00)
with mock_datetime_now(t, datetime):
p = self.load_policy(
{
"name": "offhours-records",
"resource": "ec2",
"filters": [
{"State.Name": "running"},
{
"type": "offhour",
"offhour": 19,
"tag": "custodian_downtime",
"default_tz": "est",
"weekends": False,
},
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(resources, [])
with open(
os.path.join(
p.options["output_dir"], "offhours-records", "parse_errors.json"
)
) as fh:
data = json.load(fh)
self.assertEqual(len(data), 1)
self.assertEqual(data[0][0], "i-0ee3a9bc2eeed269f")
self.assertEqual(data[0][1], "off=[m-f,8];on=[n-f,5];pz=est")
with open(
os.path.join(p.options["output_dir"], "offhours-records", "opted_out.json")
) as fh:
data = json.load(fh)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["InstanceId"], "i-0a619b58a7e704a9f")
def test_validate(self):
url_test = "s3://test-dest/holidays.csv"
self.assertRaises(
PolicyValidationError, OffHour({"default_tz": "zmta"}).validate
)
self.assertRaises(PolicyValidationError, OffHour({"offhour": 25}).validate)
self.assertRaises(
PolicyValidationError,
OffHour(
{
"skip-days": ["2017-01-01"],
"skip-days-from": {"expr": 0, "format": "csv", "url": url_test},
}
).validate,
)
i = OffHour({})
self.assertEqual(i.validate(), i)
def test_process(self):
f = OffHour({"opt-out": True})
instances = [
instance(Tags=[]),
instance(Tags=[{"Key": "maid_offhours", "Value": ""}]),
instance(Tags=[{"Key": "maid_offhours", "Value": "on"}]),
instance(Tags=[{"Key": "maid_offhours", "Value": "off"}]),
instance(
Tags=[
{
"Key": "maid_offhours",
"Value": "off=(m-f,5);zebrablue,on=(t-w,5)",
}
]
),
]
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=19,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
with mock_datetime_now(t, datetime):
self.assertEqual(
f.process(instances), [instances[0], instances[1], instances[2]]
)
def test_opt_out_behavior(self):
# Some users want to match based on policy filters to
# a resource subset with default opt out behavior
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=19,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
f = OffHour({"opt-out": True})
with mock_datetime_now(t, datetime):
i = instance(Tags=[])
self.assertEqual(f(i), True)
i = instance(Tags=[{"Key": "maid_offhours", "Value": ""}])
self.assertEqual(f(i), True)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "on"}])
self.assertEqual(f(i), True)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "off"}])
self.assertEqual(f(i), False)
self.assertEqual(f.opted_out, [i])
def test_opt_in_behavior(self):
# Given the addition of opt out behavior, verify if its
# not configured that we don't touch an instance that
# has no downtime tag
i = instance(Tags=[])
i2 = instance(Tags=[{"Key": "maid_offhours", "Value": ""}])
i3 = instance(Tags=[{"Key": "maid_offhours", "Value": "on"}])
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=19,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
f = OffHour({})
with mock_datetime_now(t, datetime):
self.assertEqual(f(i), False)
self.assertEqual(f(i2), True)
self.assertEqual(f(i3), True)
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=7,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
f = OnHour({})
with mock_datetime_now(t, datetime):
self.assertEqual(f(i), False)
self.assertEqual(f(i2), True)
self.assertEqual(f(i3), True)
def xtest_time_match_stops_after_skew(self):
hour = 7
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=hour,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OnHour({"skew": 1})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(0, 4):
dt.target = t.replace(hour=hour + n)
results.append(f(i))
self.assertEqual(results, [True, True, False, False])
def test_resource_schedule_error(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2015, month=12, day=1, hour=19, minute=5)
f = OffHour({})
f.process_resource_schedule = lambda: False
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
self.assertEqual(f(i), False)
def test_time_filter_usage_errors(self):
self.assertRaises(NotImplementedError, Time, {})
def test_everyday_onhour(self):
# weekends on means we match times on the weekend
start_day = 14 # sunday
t = datetime.datetime(year=2016, day=start_day, month=8, hour=7, minute=20)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OnHour({"weekends": False})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [True] * 7)
def test_everyday_offhour(self):
# weekends on means we match times on the weekend
start_day = 14 # sunday
t = datetime.datetime(year=2016, day=start_day, month=8, hour=19, minute=20)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OffHour({"weekends": False})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [True] * 7)
def test_weekends_only_onhour_support(self):
# start day is a sunday, weekend only means we only start
# on monday morning.
start_day = 14
t = datetime.datetime(year=2016, day=start_day, month=8, hour=7, minute=20)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OnHour({"weekends-only": True})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [False, True, False, False, False, False, False])
def test_weekends_only_offhour_support(self):
# start day is a sunday, weekend only means we only stop
# on friday evening.
start_day = 14
t = datetime.datetime(year=2016, day=start_day, month=8, hour=7, minute=20)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OnHour({"weekends-only": True})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [False, True, False, False, False, False, False])
def test_onhour_weekend_support(self):
start_day = 14
t = datetime.datetime(year=2016, day=start_day, month=2, hour=19, minute=20)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OffHour({"weekends-only": True})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [False, False, False, False, False, True, False])
def test_offhour_weekend_support(self):
start_day = 26
t = datetime.datetime(year=2016, day=start_day, month=2, hour=19, minute=20)
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OffHour({})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(0, 4):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [True, False, False, True])
def test_current_time_test(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2015, month=12, day=1, hour=19, minute=5)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
f = OffHour({})
p = f.get_tag_value(i)
self.assertEqual(p, "tz=est")
tz = f.get_tz("est")
self.assertTrue(
'America/New_York' in str(tz) or
'US/Eastern' in str(tz))
self.assertEqual(datetime.datetime.now(tz), t)
self.assertEqual(t.hour, 19)
def test_offhours_real_world_values(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2015, month=12, day=1, hour=19, minute=5)
with mock_datetime_now(t, datetime):
results = [
OffHour({})(i)
for i in [
instance(Tags=[{"Key": "maid_offhours", "Value": ""}]),
instance(Tags=[{"Key": "maid_offhours", "Value": "on"}]),
instance(
Tags=[{"Key": "maid_offhours", "Value": '"Offhours tz=ET"'}]
),
instance(
Tags=[{"Key": "maid_offhours", "Value": "Offhours tz=PT"}]
),
]
]
# unclear what this is really checking
self.assertEqual(results, [True, True, True, True])
def test_offhours_get_value(self):
off = OffHour({"default_tz": "ct"})
i = instance(Tags=[{"Key": "maid_offhours", "Value": "Offhours tz=PT"}])
self.assertEqual(off.get_tag_value(i), "offhours tz=pt")
self.assertFalse(off.parser.has_resource_schedule(off.get_tag_value(i), "off"))
self.assertTrue(off.parser.keys_are_valid(off.get_tag_value(i)))
self.assertEqual(off.parser.raw_data(off.get_tag_value(i)), {"tz": "pt"})
def test_offhours(self):
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=19,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
self.assertEqual(OffHour({})(i), True)
def test_onhour(self):
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=7,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
self.assertEqual(OnHour({})(i), True)
self.assertEqual(OnHour({"onhour": 8})(i), False)
def test_cant_parse_tz(self):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=evt"}])
self.assertEqual(OffHour({})(i), False)
def test_custom_offhours(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=19, minute=00)
results = []
with mock_datetime_now(t, datetime):
for i in [
instance(
Tags=[
{
"Key": "maid_offhours",
"Value": "off=(m-f,19);on=(m-f,7);tz=et",
}
]
),
instance(
Tags=[
{
"Key": "maid_offhours",
"Value": "off=(m-f,20);on=(m-f,7);tz=et",
}
]
),
]:
results.append(OffHour({})(i))
self.assertEqual(results, [True, False])
def test_custom_onhours(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
results = []
with mock_datetime_now(t, datetime):
for i in [
instance(
Tags=[
{
"Key": "maid_offhours",
"Value": "off=(m-f,19);on=(m-f,7);tz=et",
}
]
),
instance(
Tags=[
{
"Key": "maid_offhours",
"Value": "off=(m-f,20);on=(m-f,9);tz=et",
}
]
),
]:
results.append(OnHour({})(i))
self.assertEqual(results, [True, False])
def test_arizona_tz(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(
Tags=[
{"Key": "maid_offhours", "Value": "off=(m-f,19);on=(m-f,7);tz=at"}
]
)
self.assertEqual(OnHour({})(i), True)
i = instance(
Tags=[
{"Key": "maid_offhours", "Value": "off=(m-f,20);on=(m-f,6);tz=ast"}
]
)
self.assertEqual(OnHour({})(i), False)
def test_custom_bad_tz(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(
Tags=[
{"Key": "maid_offhours", "Value": "off=(m-f,19);on=(m-f,7);tz=et"}
]
)
self.assertEqual(OnHour({})(i), True)
i = instance(
Tags=[
{"Key": "maid_offhours", "Value": "off=(m-f,20);on=(m-f,7);tz=abc"}
]
)
self.assertEqual(OnHour({})(i), False)
def test_custom_bad_hours(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=19, minute=00)
# default error handling is to exclude the resource
with mock_datetime_now(t, datetime):
# This isn't considered a bad value, its basically omitted.
i = instance(Tags=[{"Key": "maid_offhours", "Value": "off=();tz=et"}])
self.assertEqual(OffHour({})(i), False)
i = instance(
Tags=[
{"Key": "maid_offhours", "Value": "off=(m-f,90);on=(m-f,7);tz=et"}
]
)
# malformed value
self.assertEqual(OffHour({})(i), False)
t = t.replace(year=2016, month=5, day=26, hour=13, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "off=();tz=et"}])
# will go to default values, but not work due to default time
self.assertEqual(OffHour({})(i), False)
i = instance(
Tags=[
{"Key": "maid_offhours", "Value": "off=(m-f,90);on=(m-f,7);tz=et"}
]
)
self.assertEqual(OffHour({})(i), False)
def test_tz_only(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
self.assertEqual(OnHour({})(i), True)
def test_tz_long_form_resolve(self):
pacific = tzutil.gettz("America/Los_Angeles")
nzt = tzutil.gettz("Pacific/Auckland")
gmt = tzutil.gettz("Etc/GMT")
easter_island = tzutil.gettz("Chile/EasterIsland")
self.assertEqual(
OnHour({}).get_tz('america/los_angeles'),
pacific)
self.assertEqual(
OnHour({}).get_tz('pst'),
pacific)
self.assertEqual(
OnHour({}).get_tz('pacific/auckland'),
nzt)
self.assertEqual(
OnHour({}).get_tz('gmt'),
gmt)
self.assertEqual(
OnHour({}).get_tz('chile/easterisland'),
easter_island)
def test_empty_tag(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": ""}])
self.assertEqual(OnHour({})(i), True)
def test_on_tag(self):
t = datetime.datetime.now(tzutil.gettz("America/New_York"))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "on"}])
self.assertEqual(OnHour({})(i), True)
class ScheduleParserTest(BaseTest):
# table style test
# list of (tag value, parse result)
table = [
################
# Standard cases
(
"off=(m-f,10);on=(m-f,7);tz=et",
{
"off": [{"days": [0, 1, 2, 3, 4], "hour": 10}],
"on": [{"days": [0, 1, 2, 3, 4], "hour": 7}],
"tz": "et",
},
),
(
"off=[(m-f,9)];on=(m-s,10);tz=pt",
{
"off": [{"days": [0, 1, 2, 3, 4], "hour": 9}],
"on": [{"days": [0, 1, 2, 3, 4, 5], "hour": 10}],
"tz": "pt",
},
),
(
"off=[(m-f,23)];on=(m-s,10);tz=pt",
{
"off": [{"days": [0, 1, 2, 3, 4], "hour": 23}],
"on": [{"days": [0, 1, 2, 3, 4, 5], "hour": 10}],
"tz": "pt",
},
),
(
"off=(m-f,19);on=(m-f,7);tz=pst",
{
"off": [{"days": [0, 1, 2, 3, 4], "hour": 19}],
"on": [{"days": [0, 1, 2, 3, 4], "hour": 7}],
"tz": "pst",
},
),
# wrap around days (saturday, sunday, monday)
(
"on=[(s-m,10)];off=(s-m,19)",
{
"on": [{"days": [5, 6, 0], "hour": 10}],
"off": [{"days": [5, 6, 0], "hour": 19}],
"tz": "et",
},
),
# multiple single days specified
(
"on=[(m,9),(t,10),(w,7)];off=(m-u,19)",
{
"on": [
{"days": [0], "hour": 9},
{"days": [1], "hour": 10},
{"days": [2], "hour": 7},
],
"off": [{"days": [0, 1, 2, 3, 4, 5, 6], "hour": 19}],
"tz": "et",
},
),
# using brackets also works, if only single time set
(
"off=[m-f,20];on=[m-f,5];tz=est",
{
"on": [{"days": [0, 1, 2, 3, 4], "hour": 5}],
"off": [{"days": [0, 1, 2, 3, 4], "hour": 20}],
"tz": "est",
},
),
# same string, exercise cache lookup.
(
"off=[m-f,20];on=[m-f,5];tz=est",
{
"on": [{"days": [0, 1, 2, 3, 4], "hour": 5}],
"off": [{"days": [0, 1, 2, 3, 4], "hour": 20}],
"tz": "est",
},
),
################
# Invalid Cases
("", None),
# invalid day
("off=(1-2,12);on=(m-f,10);tz=est", None),
# invalid hour
("off=(m-f,a);on=(m-f,10);tz=est", None),
("off=(m-f,99);on=(m-f,7);tz=pst", None),
# invalid day
("off=(x-f,10);on=(m-f,10);tz=est", None),
# no hour specified for on
("off=(m-f);on=(m-f,10);tz=est", None),
# invalid day spec
("off=(m-t-f,12);on=(m-f,10);tz=est", None),
# random extra
("off=(m-f,5);zebra=blue,on=(t-w,5)", None),
("off=(m-f,5);zebra=blue;on=(t-w,5)", None),
# random extra again
("off=(m-f,5);zebrablue,on=(t-w,5)", None),
("bar;off=(m-f,5);zebrablue,on=(t-w,5)", None),
]
def test_schedule_parser(self):
self.maxDiff = None
parser = ScheduleParser({"tz": "et"})
for value, expected in self.table:
self.assertEqual(parser.parse(value), expected)
def test_offhours_skip(self):
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=19,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
self.assertEqual(OffHour({})(i), True)
self.assertEqual(OffHour({"skip-days": ["2015-12-01"]})(i), False)
self.assertEqual(
OffHour({"skip-days": ["2017-01-01", "2015-12-01"]})(i), False
)
self.assertEqual(OffHour({"skip-days": ["2015-12-02"]})(i), True)
def test_onhour_skip(self):
t = datetime.datetime(
year=2015,
month=12,
day=1,
hour=7,
minute=5,
tzinfo=tzutil.gettz("America/New_York"),
)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{"Key": "maid_offhours", "Value": "tz=est"}])
self.assertEqual(OnHour({})(i), True)
self.assertEqual(OnHour({"onhour": 8})(i), False)
self.assertEqual(OnHour({"skip-days": ["2015-12-01"]})(i), False)
self.assertEqual(
OnHour({"skip-days": ["2017-01-01", "2015-12-01"]})(i), False
)
self.assertEqual(OnHour({"skip-days": ["2015-12-02"]})(i), True)
|
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import copy
import logging
import os
import threading
import time
import xbmc
import xbmcgui
import mythbox.msg as m
import mythbox.ui.toolkit as toolkit
from mythbox.ui.toolkit import showPopup
from mythbox.util import formatSeconds, BoundedEvictingQueue, safe_str, catchall
from mythbox.mythtv.db import inject_db
log = logging.getLogger('mythbox.ui')
mlog = logging.getLogger('mythbox.method')
mythPlayer = None
# Interval in millis to sleep when we're waiting around for
# async xbmc events to take complete
SLEEP_MILLIS = 250
class BasePlayer(xbmc.Player):
def __init__(self, *args, **kwargs):
xbmc.Player.__init__(self, *args, **kwargs)
self.active = True
self.tracker = PositionTracker(self)
def buildPlaybackUrl(self):
raise Exception('Abstract method')
def buildPlayList(self):
raise Exception('Abstract method')
def playRecording(self, commSkipper):
raise Exception('Abstract method')
@catchall
def onPlayBackStarted(self):
if self.active:
log.debug('> base:onPlayBackStarted %s' % self)
for target in (self.bookmarker, self.tracker, self.commSkipper):
try:
target.onPlayBackStarted()
except:
log.exception('onPlayBackStarted')
log.debug('< base:onPlayBackStarted %s' % self)
def onPlayBackStopped(self):
if self.active:
self.active = False
log.debug('> onPlayBackStopped')
for target in (self.tracker, self.commSkipper, self.bookmarker):
try:
target.onPlayBackStopped()
except:
log.exception('onPlayBackStopped')
log.debug('< onPlayBackStopped')
def onPlayBackEnded(self):
if self.active:
self.active = False
log.debug('> onPlayBackEnded')
for target in (self.tracker, self.commSkipper, self.bookmarker):
try:
target.onPlayBackEnded()
except:
log.exception('onPlayBackStopped')
log.debug('< onPlayBackEnded')
class MountedPlayer(BasePlayer):
'''Plays mythtv recordings with support for bookmarks, commercial skipping, etc'''
def __init__(self, *args, **kwargs):
BasePlayer.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('translator', 'mythThumbnailCache', 'program', 'platform')]
self.bookmarker = MythBookmarker(self, self.program, self.translator)
self._playbackCompletedLock = threading.Event()
self._playbackCompletedLock.clear()
def buildPlaybackUrl(self):
return self.program.getLocalPath()
def playRecording(self, commSkipper):
"""
Plays the given program. Blocks until playback is stopped or until the
end of the recording is reached
"""
mlog.debug('> playRecording(%s)' % safe_str(self.program.title()))
assert not self.isPlaying(), 'Player is already playing a video'
self.commSkipper = commSkipper
self.play(self.buildPlaybackUrl(), self.buildPlayList(), windowed=False)
self.waitForPlaybackCompleted()
self.active = False
mlog.debug('< playRecording(...)')
# Callbacks ---------------------------------------------------------------
@catchall
def onPlayBackStopped(self):
if self.active:
try:
super(MountedPlayer, self).onPlayBackStopped()
finally:
self._playbackCompletedLock.set()
@catchall
def onPlayBackEnded(self):
if self.active:
try:
super(MountedPlayer, self).onPlayBackEnded()
finally:
self._playbackCompletedLock.set()
# Private -----------------------------------------------------------------
def waitForPlaybackCompleted(self):
while not self._playbackCompletedLock.isSet():
#log.debug('Waiting for playback completed...')
xbmc.sleep(SLEEP_MILLIS)
def buildPlayList(self):
mlog.debug("> _buildPlayList")
playlistItem = xbmcgui.ListItem()
title = self.program.fullTitle()
comms = self.program.getCommercials()
if len(comms) > 0:
title += '(%s breaks - %s)' % (len(comms), ', '.join(map(lambda c: formatSeconds(c.start), comms)))
playlistItem.setInfo(
"video", {
"Genre" : self.program.category(),
"Studio" : self.program.formattedChannel(),
"Title" : title,
"Plot" : self.program.formattedDescription()
})
# TODO: Set start offset if a comm break starts at 0.0
# playlistItem.setProperty('StartOffset', '256.4')
mlog.debug("< _buildPlayList")
return playlistItem
class StreamingPlayer(BasePlayer):
"""Use xbmcs built in myth support to stream the recording over the network."""
def __init__(self, *args, **kwargs):
BasePlayer.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('settings', 'translator', 'mythThumbnailCache', 'program', 'platform')]
self.bookmarker = MythBookmarker(self, self.program, self.translator)
@inject_db
def buildPlaybackUrl(self):
backend = self.db().toBackend(self.program.hostname())
# myth://dbuser:dbpassword@mythbackend_hostname:mythbackend_port/recordings/filename.mpg
url = 'myth://%s:%s@%s:%s/recordings/%s' % (
self.settings.get('mysql_database'),
self.settings.get('mysql_password'),
backend.ipAddress,
backend.port,
self.program.getBareFilename())
log.debug('Playback url: %s' % url)
return url
def playRecording(self, commSkipper):
"""
Plays the given program. Blocks until playback is stopped or until the
end of the recording is reached
"""
mlog.debug('> playRecording %s' % safe_str(self.program.title()))
assert not self.isPlaying(), 'Player is already playing a video'
self.commSkipper = commSkipper
# extract recording's framerate from xbmc.log and inject into bookmarker
#from mythbox.log import LogScraper
#logtail = LogScraper(self.platform.getXbmcLog())
#worker = logtail.matchLineAsync("fps:", timeout=30, callback=self.bookmarker.onFPS)
self.play(self.buildPlaybackUrl(), self.buildPlayList())
#worker.join()
#self._waitForPlaybackCompleted()
#self.active = False
mlog.debug('< playRecording')
def buildPlayList(self):
playlistItem = xbmcgui.ListItem()
comms = self.program.getCommercials()
title = self.program.fullTitle()
if len(comms) > 0:
# times are invalid when streaming so only show cardinality
title += u' (%d breaks)' % len(comms)
playlistItem.setInfo(
"video", {
"Genre" : self.program.category(),
"Studio" : self.program.formattedChannel(),
"Title" : title,
"Plot" : self.program.formattedDescription()
})
return playlistItem
class Bookmarker(object):
pass
class XbmcBookmarker(Bookmarker):
'''When using a myth:// style URL for playback, defer to XBMC's built in
resume from last postion functionality'''
def __init__(self, *args, **kwargs):
pass
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
class MythBookmarker(Bookmarker):
'''Mimics XBMC video player's builtin auto resume functionality'''
def __init__(self, player, program, translator):
self.player = player
self.program = program
self.translator = translator
#self.fps = None
def onPlayBackStarted(self):
self._resumeFromBookmark()
def onPlayBackStopped(self):
self._saveLastPositionAsBookmark()
def onPlayBackEnded(self):
self._clearBookmark()
# @catchall
# def onFPS(self, line):
# log.debug('onFPS: %s' % line)
# if line is not None:
# log.debug('onFPS: line not none')
# words = line.split()
# tagIndex = words.index('fps:')
# self.fps = float(words[tagIndex+1].strip(','))
# self.program.setFPS(self.fps)
# log.debug('fps = %s' % self.fps)
# else:
# log.debug('onFPS: line is none')
# self.fps = 0.0
# #if log.isEnabledFor(logging.DEBUG):
# # showPopup('FPS', 'FPS %s' % self.fps)
def _clearBookmark(self):
if self.program.isBookmarked():
self.program.setBookmark(0.0)
def _resumeFromBookmark(self):
log.debug('bookmarker : before wait for gotFPS')
# # wait for fps to be set by log scaper for a max of 10 seconds
# cnt = 0
# while self.fps is None and cnt < 100:
# time.sleep(0.1)
# cnt += 1
#
# if self.fps is None:
# log.warn('Timed out waiting for fps to be set on bookmarker')
# else:
# log.debug('bookmarker : after wait for gotFPS')
bookmarkSecs = self.program.getBookmark()
if bookmarkSecs > 0 and bookmarkSecs < (self.program.getDuration() * 60):
fb = formatSeconds(bookmarkSecs)
log.debug('Resuming recording at bookmarked position of %s' % fb)
showPopup(self.program.title(), self.translator.get(m.RESUMING_AT) % fb)
self.player.seekTime(bookmarkSecs)
while self.player.getTime() < bookmarkSecs:
log.debug('Waiting for player time %s to seek past bookmark of %s' %(formatSeconds(self.player.getTime()), fb))
xbmc.sleep(SLEEP_MILLIS)
else:
log.debug('Recording has no bookmark or bookmark exceeds program length')
def _saveLastPositionAsBookmark(self):
lastPos = self.player.tracker.getLastPosition()
log.debug('Setting bookmark on %s to %s' %(safe_str(self.program.title()), formatSeconds(lastPos)))
try:
self.program.setBookmark(lastPos)
except:
log.exception('_saveLastPositionAsBookmark catchall')
class PositionTracker(object):
"""
Tracks the last position of the player. This is necessary because
Player.getTime() is not valid after the callback to
Player.onPlayBackStopped() has completed.
"""
HISTORY_SECS = 5 # Number of seconds of history to keep around
def __init__(self, player):
self._player = player
self._lastPos = 0.0
self._tracker = BoundedEvictingQueue((1000/SLEEP_MILLIS) * self.HISTORY_SECS)
self._history = []
def onPlayBackStarted(self):
log.debug('Starting position tracker...')
self._tracker = threading.Thread(
name='Position Tracker',
target = self._trackPosition)
self._tracker.start()
def onPlayBackStopped(self):
if self._tracker.isAlive():
log.debug('Position tracker stop called. Still alive = %s' % self._tracker.isAlive())
else:
log.debug('Position tracker thread already dead.')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def getHistory(self, howFarBack):
"""Returns a list of TrackerSamples from 'howFarBack' seconds ago."""
endPos = self._lastPos
startPos = endPos - howFarBack
slice = []
for sample in self._history:
if startPos <= sample.pos and sample.pos <= endPos:
slice.append(sample)
log.debug('Tracker history for %s secs = [%s] %s' % (howFarBack, len(slice), slice))
return slice
def getLastPosition(self):
return self._lastPos
def _trackPosition(self):
"""Method run in a separate thread. Tracks last position of player as long as it is playing"""
try:
while self._player.isPlaying():
self._lastPos = self._player.getTime()
self._history.append(TrackerSample(time.time(), self._lastPos))
#log.debug('Tracker time = %s' % self._lastPos)
xbmc.sleep(SLEEP_MILLIS)
log.debug('Position tracker thread exiting with lastPos = %s' % self.getLastPosition())
except:
log.exception('_trackPosition catchall')
class TrackerSample(object):
def __init__(self, time, pos):
self.time = time
self.pos = pos
def __repr__(self):
return 'Sample {time = %s, pos = %s}' % (self.time, self.pos)
class ICommercialSkipper(object):
"""Common interface for commercial skipping implementations."""
def __init__(self, player, program, translator):
self.player = player
self.program = program
self.translator = translator
def onPlayBackStarted(self):
raise NotImplementedError, 'Abstract base class'
def onPlayBackStopped(self):
raise NotImplementedError, 'Abstract base class'
def onPlayBackEnded(self):
raise NotImplementedError, 'Abstract base class'
class NoOpCommercialSkipper(ICommercialSkipper):
def __init__(self, player=None, program=None, translator=None):
ICommercialSkipper.__init__(self, player, program, translator)
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
class TrackingCommercialSkipper(ICommercialSkipper):
"""
Commercial skipper that monitors the position of the currently playing file
and skips commercials accordingly.
"""
def __init__(self, player, program, translator):
ICommercialSkipper.__init__(self, player, program, translator)
def onPlayBackStarted(self):
log.debug('program in skipper = %s' % safe_str(self.program.title()))
# don't want changes to commbreak.skipped to stick beyond the scope of
# this player instance so use a deepcopy
self._breaks = copy.deepcopy(self.program.getCommercials())
# Has a value when video position falls in a comm break
self._currentBreak = None
for b in self._breaks:
log.debug('break = %s' % b)
self._skipper = threading.Thread(name='Tracking Commercial Skipper', target = self._trackCommercials)
self._skipper.start()
def onPlayBackStopped(self):
if self._skipper.isAlive():
log.debug('Commercial tracker stop called. Still alive = %s' % self._skipper.isAlive())
else:
log.debug('Commercial tracker thread already dead')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def _isInBreak(self, pos):
for b in self._breaks:
if b.isDuring(pos):
self._currentBreak = b
return True
self._currentBreak = None
return False
def _trackCommercials(self):
"""Method run in a separate thread to skip over commercials"""
try:
if len(self._breaks) == 0:
log.debug('Recording %s has no comm breaks, exiting comm tracker' % safe_str(self.program.title()))
return
while self.player.isPlaying():
pos = self.player.getTime()
if self._isInBreak(pos) and not self._currentBreak.skipped:
log.debug('entered comm break = %s' % self._currentBreak)
if self._isCloseToStartOfCommercial(pos) and not self._wasUserSkippingAround(pos):
log.debug('Comm skip activated!')
showPopup(self.program.title(), self.translator.get(m.SKIPPING_COMMERCIAL) % formatSeconds(self._currentBreak.duration()), 3000)
self.player.seekTime(self._currentBreak.end)
self._waitForPlayerToPassCommercialBreak()
self._currentBreak.skipped = True
if self._landedInCommercial(pos):
log.debug("Landed in comm break and want to skip forward")
showPopup(self.program.title(), self.translator.get(m.FORWARDING_THROUGH) % formatSeconds(self._currentBreak.duration()), 3000)
self.player.seekTime(self._currentBreak.end)
self._waitForPlayerToPassCommercialBreak()
self._currentBreak.skipped = True
xbmc.sleep(SLEEP_MILLIS)
log.debug('Commercial tracker thread exiting')
except:
log.exception('_trackCommercials catchall')
def _landedInCommercial(self, currPos):
#samplesInCommercial = 4 # In commercial for 2 seconds
secondsToSample = 4
samples = self.player.tracker.getHistory(secondsToSample)
samplesInCommercial = len(filter(lambda x: self._currentBreak.isDuring(x.pos), samples))
log.debug('Samples in commercial = %d' % samplesInCommercial)
return samplesInCommercial > 8 and samplesInCommercial < 12
def _wasUserSkippingAround(self, currPos):
"""
Check last 2 seconds of history for number of samples.
A high number of samples indicates that user was probably
not skipping around in the video hence the comm skip would
be a good thing.
"""
wasSkipping = False
samplePeriodSecs = 2 # TODO: Pass in as param to method call
# If currPos is too close to the start of the video..assume not
# skipping around
if currPos > samplePeriodSecs:
requiredSamples = 6 # TODO: Derive as percentage instead of hardcoding
numSamples = len(self.player.tracker.getHistory(samplePeriodSecs))
log.debug('Samples in last %s seconds = %s' %(samplePeriodSecs, numSamples))
wasSkipping = numSamples < requiredSamples
log.debug('User was skipping around = %s' % wasSkipping)
return wasSkipping
def _isCloseToStartOfCommercial(self, currPos):
"""
check that the current pos is close in proximity to the start of the
commercial break. assumes that comm break is skipped only if the user
played directly into the commercial vs. landing inside the commercial
via ffwd, rewind, etc.
"""
windowStart = self._currentBreak.start - 1
windowEnd = self._currentBreak.start + 2
isClose = currPos >= windowStart and currPos <= windowEnd
log.debug('User close to start of comm break = %s' % isClose)
return isClose
def _waitForPlayerToPassCommercialBreak(self):
# TODO: What if user stops playing while in this loop? Add isPlaying() to loop invariant
# wait for player pos to pass current break
while self._currentBreak.isDuring(self.player.getTime()):
xbmc.sleep(SLEEP_MILLIS)
|
|
import collections
import json
import random
import re
def num(s):
try:
return int(s)
except ValueError:
return float(s)
# load a json file into a dictionary
def load_json(filename):
f = json.load(open(filename), object_pairs_hook=collections.OrderedDict)
return f
# generate an entity from a json profile
def generate_entity(data):
# generate a blank entity
entity = {}
# for each key in the data profile
for key, options in data["profile"].items():
# generate a value for the entity according to the list of options
value = next(choose_options(options, entity, 1))
# parses commands out of the value generated
entity[key] = parse_commands(data['resources'], entity, value)
return entity
# returns a generator which yields a maximum of n items from options for which entity fulfills all requirements
def choose_options(options, entity, n):
valid_options = get_valid_options(options, entity)
# get a generator that returns the chosen options
chosen = weighted_selection(valid_options, n)
# yield each chosen option
while n:
# return the completed value
yield next(chosen)
n -= 1
# returns a list of items contained in options for which entity fulfills all requirements
def get_valid_options(options, entity):
valid_options = []
# for each option
for opt in options:
# check if entity fulfills all requirements
if check_reqs(opt, entity):
# add the option to the valid list
valid_options.append([parse_weight({}, opt['wt']), opt['val']])
return valid_options
# parses the expression that represents the weight of an object, and evaluates the expression to return an integer
def parse_weight(context, weight):
# replace selection commands in the expression
# get list of all matches
matches = re.findall("\$(.*?)\$", weight)
# unique list
match_set = set(matches)
# for each unique match
for match in match_set:
# replace command with its replacement value
try:
weight = weight.replace('$'+match+'$', context[match])
except KeyError:
weight = weight.replace('$'+match+'$', "1")
# evaluate the expression with constants
# THIS IS SUPER UNSAFE, WILL EXECUTE ARBITRARY CODE
return eval(weight)
# weighted selection without replacement
def weighted_selection(items, n):
total_weight = float(sum(wt for wt, val in items))
while n:
index = 0
running_weight = items[0][0]
rand_num = random.random() * total_weight
while rand_num > running_weight:
index += 1
running_weight += items[index][0]
wt, val = items.pop(index)
total_weight -= wt
yield val
# removes all command syntax from a string, replacing it with a valid value for that command
def parse_commands(res, entity, string):
oldstring = ""
while not string == oldstring:
oldstring = string
string = parse_selection(res, entity, string)
string = parse_numeric(string)
return string
# selection command: $...$
# returns a string with instances of a selection command replaced by a value selected by the command
# res is the list of resources that the selection command will pull from
# entity is the entity being generated as it currently exists, used for checking against requirements
def parse_selection(res, entity, string):
# get list of all matches
matches = re.findall("\$(.*?)\$", string)
# unique list
match_set = set(matches)
# for each unique match
for match in match_set:
# get number of occurrences in original string
occurences = matches.count(match)
# get replacement values
replacements = choose_options(res[match], entity, occurences)
# for each occurence
for i in range(occurences):
# replace command with a unique replacement value
string = string.replace('$'+match+'$', next(replacements), 1)
return string
# numeric generation command: [...]
# returns a string with instances of a numeric generation command replaces with a number generated from the command
# string is the string that will be parsed
def parse_numeric(string):
while True:
# break when no more replacements are needed
matches = re.search("\[([0-9]*)\-([0-9]*)\]", string)
if matches is None:
break
# generate replacement value
replacement = str(random.randint(int(matches.group(1)), int(matches.group(2))))
# replace command with its replacement value
string = string.replace(matches.group(0), replacement)
return string
# checks an entity to ensure that it follows all requirements in options
# options is the option that entity is being checked against to see if it meets the requirements
# entity is the entity that is being checked against the requirements of option
def check_reqs(option, entity):
# for each key in requirements
for key, values in option["req"].items():
if not check_attr(values, entity, key):
# return that one or more requirements were not met
return False
# if no attribute requirements went unfulfilled, return that all requirements were met
return True
# checks whether an attribute on entity matches any one of the possible values for the requirement
# vals is the list of possible values that entity[attr] can have
# entity is the entity being checked
# attr is the particular attribute being checked on the entity
def check_attr(vals, entity, attr):
# if it meets one of the requirements, return True
for value in vals:
if check_req(value, entity, attr):
return True
# if it met none of the requirements, return False
return False
# checks whether exactly one possible value of a requirement matches the corresponding attribute attr on entity
# req is the value that is being compared to entity[attr]
# entity is the entity that is attempting to fulfill the requirement
# attr is the attribute of entity that must match the value of req
def check_req(req, entity, attr):
compare_nums = re.match("\[(<|>|<=|>=|=)([0-9]+)\]", req)
# if the value is a numeric comparison
if not compare_nums is None:
try:
num(entity[attr])
except ValueError:
print("Attempted to do a numeric comparison on a non-numeric entity attribute: "+entity.type+"["+attr+"]")
return compare(compare_nums.group(1), entity[attr], compare_nums.group(2))
# if the requirement is a straight text comparison
else:
if entity[attr] == req:
return True
else:
return False
# takes 3 strings
# operator is a string containing the operator to use for comparison (one of =, <, <=, >, >=)
# a and b are the values that will be compared
# both a and b must be numeric
def compare(operator, a, b):
if operator == "=":
return num(a) == num(b)
elif operator == "<":
return num(a) < num(b)
elif operator == "<=":
return num(a) <= num(b)
elif operator == ">":
return num(a) > num(b)
elif operator == ">=":
return num(a) >= num(b)
|
|
# This code is Copyright 2014-2017 by Pier Carlo Chiodi.
# See full license in LICENSE file.
# See TRANSFORMATIONS.md file for details
import json
from pierky.p2es.errors import P2ESError
# Parse list of conditions c against data d.
# Returns: True | False (conditions matched / did not match).
# Raises exceptions: yes.
def parse_conditions_list(c, d):
if not c:
raise P2ESError('Empty list')
if isinstance(c[0], basestring):
if c[0] == 'AND':
if len(c) > 2:
for sub_c in c[1:]:
if not parse_conditions(sub_c, d):
return False
return True
else:
return False
elif c[0] == 'OR':
if len(c) > 2:
for sub_c in c[1:]:
if parse_conditions(sub_c, d):
return True
return False
else:
return True
else:
raise P2ESError(
'Logical groups must begin with "AND" or "OR" '
'("{}" found)'.format(c[0])
)
else:
# default to "AND" if not specified
for sub_c in c:
if not parse_conditions(sub_c, d):
return False
return True
# Parse condition c against data d, using operator opfield.
# Returns: True | False (condition matched / did not match).
# Raises exceptions: yes.
def parse_conditions_dict(c, d, opfield):
op = '='
n = None
v = None
for k in c:
if k == opfield:
op = c[k]
if not op in ('=', '>', '>=', '<', '<=', '!=', 'in', 'notin'):
raise P2ESError('Unexpected operator: "{}"'.format(op))
else:
if n is None:
n = k
v = c[k]
else:
raise P2ESError('Only one name/value pair allowed')
if op in ('in', 'notin') and not isinstance(v, list):
raise P2ESError('The "{}" operator requires a list'.format(op))
if n is None:
raise P2ESError('Name/value pair expected')
if n not in d:
return False
if op == '=':
return d[n] == v
elif op == '>':
return d[n] > v
elif op == '>=':
return d[n] >= v
elif op == '<':
return d[n] < v
elif op == '<=':
return d[n] <= v
elif op == '!=':
return d[n] != v
elif op == 'in':
return d[n] in v
elif op == 'notin':
return not d[n] in v
else:
raise P2ESError('Operator not implemented: "{}"'.format(op))
# Parse conditions c against data d.
# Return: True | False (conditions matched / did not match).
# Raises exception: yes.
def parse_conditions(c, d, opfield='__op__'):
if isinstance(c, list):
return parse_conditions_list(c, d)
elif isinstance(c, dict):
return parse_conditions_dict(c, d, opfield)
else:
raise P2ESError('Unexpected object type {} from {}'.format(
type(c), str(c)
))
# Tests if a transformation syntax is valid.
# Returns: True | False.
# Raises exceptions: yes.
def test_transformation(tr):
ret = True
try:
tr_det = 'Transformations matrix ({})'.format(transformation)
except:
tr_det = 'Transformations matrix'
if 'Conditions' not in tr:
raise P2ESError('{}, "Conditions" is missing'.format(tr_det))
if 'Actions' not in tr:
raise P2ESError('{}, "Actions" is missing'.format(tr_det))
try:
parse_conditions(tr['Conditions'], {})
except P2ESError as e:
raise P2ESError('{}, invalid "Conditions": {}'.format(tr_det, str(e)))
for action in tr['Actions']:
if 'Type' not in action:
raise P2ESError('{}, "Type" is missing'.format(tr_det))
tr_det += ', action type = {}'.format(action['Type'])
if action['Type'] not in ('AddField', 'AddFieldLookup', 'DelField'):
raise P2ESError('{}, "Type" unknown'.format(tr_det))
if 'Name' not in action:
raise P2ESError('{}, "Name" is missing'.format(tr_det))
if action['Type'] == 'AddField':
if 'Value' not in action:
raise P2ESError(
'{}, "Value" is missing for new field "{}"'.format(
tr_det, action['Name']
)
)
if action['Type'] == 'AddFieldLookup':
if 'LookupFieldName' not in action:
raise P2ESError(
'{}, "LookupFieldName" is missing for '
'new field "{}"'.format(tr_det, action['Name'])
)
if 'LookupTable' in action and 'LookupTableFile' in action:
raise P2ESError(
'{}, only one from "LookupTable" and '
'"LookupTableFile" allowed'.format(tr_det)
)
if 'LookupTable' not in action and 'LookupTableFile' not in action:
raise P2ESError(
'{}, "LookupTable" and "LookupTableFile" missing '
'for new field "{}"'.format(tr_det, action['Name'])
)
if 'LookupTableFile' in action:
try:
with open(action['LookupTableFile'], "r") as f:
action['LookupTable'] = json.load(f)
except Exception as e:
raise P2ESError(
'{}, error loading lookup table from {}: {}'.format(
tr_det, action['LookupTableFile'], str(e)
)
)
if __name__ == '__main__':
#Test conditions
#-------------------
#C = [ { "Name": "Bob" }, { "Age": 16, "__op__": ">=" } ]
#C = [ "OR", { "Name": "Bob" }, { "Name": "Tom" } ]
C = [ "OR",
[ { "Name": "Bob" }, { "Age": 16, "__op__": ">=" } ],
{ "Name": "Tom" },
[ { "Name": "Lisa" }, { "Age": 20, "__op__": ">=" } ]
]
#C = [ "Invalid" ]
Data = [
{ "Name": "Bob", "Age": 15 },
{ "Name": "Bob", "Age": 16 },
{ "Name": "Ken", "Age": 14 },
{ "Name": "Tom", "Age": 14 },
{ "Name": "Tom", "Age": 20 },
{ "Name": "Lisa", "Age": 15 },
{ "Name": "Lisa", "Age": 22 }
]
print(C)
for Person in Data:
try:
if parse_conditions(C, Person):
print( "YES - %s" % Person )
else:
print( "--- - %s" % Person )
except P2ESError as e:
print( "ParseConditions error: %s" % str(e) )
raise
|
|
'''
Players are Python objects with a ``__call__`` method
defined to accept a Game instance as the sole argument.
Players return None, and leave the input Game unmodified,
except for its valid_moves attribute. This value may be
replaced with another tuple containing the same moves,
but sorted in decreasing order of preference. Players
may be applied one after another for easy composability.
.. code-block:: python
>>> import dominoes
>>> g = dominoes.Game.new()
>>> g.valid_moves
(([0|0], True), ([3|4], True), ([1|3], True), ([2|2], True), ([3|3], True), ([2|3], True), ([5|6], True))
>>> dominoes.players.random(g)
>>> g.valid_moves
(([5|6], True), ([1|3], True), ([3|3], True), ([2|2], True), ([0|0], True), ([2|3], True), ([3|4], True))
.. code-block:: python
def double(game):
\'\'\'
Prefers to play doubles.
:param Game game: game to play
:return: None
\'\'\'
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: m[0].first != m[0].second))
'''
import collections
import copy
import dominoes
import random as rand
def identity(game):
'''
Leaves move preferences unchanged.
:param Game game: game to play
:return: None
'''
return
class counter:
'''
Prefers moves in the same order as the passed-in player. Keeps
a counter of the amount of times that this player gets called.
An instance of this class must first be initialized before it
can be called in the usual way.
:param callable player: player that determines the move preferences of
this player. The identity player is the default.
:param str name: the name of this player. The default is the name
of this class.
:var int count: the amount of times that this player has been called.
:var str __name__: the name of this player.
'''
def __init__(self, player=identity, name=None):
self.count = 0
self._player = player
if name is None:
self.__name__ = type(self).__name__
else:
self.__name__ = name
def __call__(self, game):
self.count += 1
return self._player(game)
def random(game):
'''
Prefers moves randomly.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda _: rand.random()))
def reverse(game):
'''
Reverses move preferences.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(reversed(game.valid_moves))
def bota_gorda(game):
'''
Prefers to play dominoes with higher point values.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -(m[0].first + m[0].second)))
def double(game):
'''
Prefers to play doubles.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: m[0].first != m[0].second))
class omniscient:
'''
Prefers to play the move that maximizes this player's final score,
assuming that all other players play with the same strategy. This
player "cheats" by looking at all hands to make its decision. An
instance of this class must first be initialized before it can be
called in the usual way.
:param int start_move: move number at which to start applying this
player. If this player is called before the
specified move number, it will have no effect.
Moves are 0-indexed. The default is 0.
:param callable player: player used to sort moves to be explored
in the underlying call to alphabeta search.
Ordering better moves first may significantly
reduce the amount of moves that need to be
explored. The identity player is the default.
:param str name: the name of this player. The default is the name
of this class.
:var str __name__: the name of this player
'''
def __init__(self, start_move=0, player=identity, name=None):
self._start_move = start_move
self._player = player
if name is None:
self.__name__ = type(self).__name__
else:
self.__name__ = name
def __call__(self, game):
# do not perform a potentially slow operation if it is
# too early in the game or if there is only one valid move
if len(game.moves) < self._start_move or len(game.valid_moves) == 1:
return
# so that we don't modify the original game
game_copy = copy.deepcopy(game)
# for performance
game_copy.skinny_board()
# perform an alphabeta search to find the optimal move sequence
moves, _ = dominoes.search.alphabeta(game_copy, player=self._player)
# place the optimal move at the beginning of game.valid_moves,
# while leaving the rest of the ordering unchanged
game.valid_moves = (moves[0],) + tuple(m for m in game.valid_moves if m != moves[0])
class probabilistic_alphabeta:
'''
This player repeatedly assumes the other players' hands, runs alphabeta search,
and prefers moves that are most frequently optimal. It takes into account all
known information to determine what hands the other players could possibly have,
including its hand, the sizes of the other players' hands, and the moves played
by every player, including the passes. An instance of this class must first be
initialized before it can be called in the usual way.
:param int start_move: move number at which to start applying this
player. If this player is called before the
specified move number, it will have no effect.
Moves are 0-indexed. The default is 0.
:param int sample_size: the number of times to assign random possible
hands to other players and run alphabeta search
before deciding move preferences. By default
considers all hands that other players could
possibly have.
:param callable player: player used to sort moves to be explored
in the underlying call to alphabeta search.
Ordering better moves first may significantly
reduce the amount of moves that need to be
explored. The identity player is the default.
:param str name: the name of this player. The default is the name
of this class.
:var str __name__: the name of this player
'''
def __init__(self, start_move=0, sample_size=float('inf'), player=identity, name=None):
self._start_move = start_move
self._sample_size = sample_size
self._player = player
if name is None:
self.__name__ = type(self).__name__
else:
self.__name__ = name
def __call__(self, game):
# do not perform a potentially slow operation if it is
# too early in the game or if there is only one valid move
if len(game.moves) < self._start_move or len(game.valid_moves) == 1:
return
if self._sample_size == float('inf'):
# by default consider all hands the other players could possibly have
hands = game.all_possible_hands()
else:
# otherwise obtain a random sample
hands = (game.random_possible_hands() for _ in range(self._sample_size))
# iterate over the selected possible hands
counter = collections.Counter()
for h in hands:
# do not modify the original game
game_copy = copy.deepcopy(game)
# set the possible hands
game_copy.hands = h
# for performance
game_copy.skinny_board()
# run alphabeta and record the optimal move
counter.update([
dominoes.search.alphabeta(game_copy, player=self._player)[0][0]
])
# prefer moves that are more frequently optimal
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -counter[m]))
|
|
import errno
import logging
import os
import shutil
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from django.conf import settings
from django.contrib.auth import SESSION_KEY, HASH_SESSION_KEY, BACKEND_SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.backends.db import SessionStore
from django.contrib.sessions.models import Session
from django.test import TestCase, Client
from django.utils.timezone import now
from mock import patch
from rest_framework.test import APIRequestFactory, force_authenticate
from django_mock_queries.mocks import mocked_relations
from constants import users
from metadata.models import kive_user, KiveUser
class DuckRequest:
""" A fake request used to test serializers. """
def __init__(self, user=None):
self.user = user or kive_user()
self.GET = {}
self.META = {}
self.method = 'GET'
def build_absolute_uri(self, url):
return url
class DuckContext(dict):
""" A fake context used to test serializers. """
def __init__(self, user=None, **kwargs):
super(DuckContext, self).__init__(**kwargs)
self['request'] = DuckRequest(user=user)
class ViewMockTestCase(TestCase, object):
def create_client(self):
patcher = mocked_relations(User, Session)
patcher.start()
self.addCleanup(patcher.stop)
user = User(pk=users.KIVE_USER_PK)
User.objects.add(user)
User.objects.model = User
# noinspection PyUnresolvedReferences
patcher = patch.object(User._meta, 'default_manager', User.objects)
patcher.start()
self.addCleanup(patcher.stop)
dummy_session_key = 'dummysession'
dummy_session = Session(
session_key=dummy_session_key,
expire_date=now() + timedelta(days=1),
session_data=SessionStore().encode({
SESSION_KEY: users.KIVE_USER_PK,
HASH_SESSION_KEY: user.get_session_auth_hash(),
BACKEND_SESSION_KEY: 'django.contrib.auth.backends.ModelBackend'}))
Session.objects.add(dummy_session)
client = Client()
client.cookies[settings.SESSION_COOKIE_NAME] = dummy_session_key
client.force_login(kive_user())
return client
class BaseTestCases:
""" A class to hide our base classes so they won't be executed as tests.
"""
def __init__(self):
pass
class ApiTestCase(TestCase, object):
"""
Base test case used for all API testing.
Such test cases should provide tests of:
- list
- detail
- creation (if applicable)
- redaction
- removal
- any other detail or list routes
In addition, inheriting classes must provide appropriate values for
self.list_path and self.list_view in their setUp().
"""
def setUp(self):
self.factory = APIRequestFactory()
self.kive_user = kive_user()
def mock_viewset(self, viewset_class):
model = viewset_class.queryset.model
patcher = mocked_relations(model, User, KiveUser)
patcher.start()
self.addCleanup(patcher.stop)
user = User(pk=users.KIVE_USER_PK)
User.objects.add(user)
self.kive_kive_user = KiveUser(pk=users.KIVE_USER_PK, username="kive")
KiveUser.objects.add(self.kive_kive_user)
# noinspection PyUnresolvedReferences
patcher2 = patch.object(viewset_class,
'queryset',
model.objects)
patcher2.start()
self.addCleanup(patcher2.stop)
def test_auth(self):
"""
Test that the API URL is correctly defined and requires a logged-in user.
"""
# First try to access while not logged in.
# noinspection PyUnresolvedReferences
request = self.factory.get(self.list_path)
# noinspection PyUnresolvedReferences
response = self.list_view(request)
self.assertEqual(response.data["detail"], "Authentication credentials were not provided.")
# Now log in and check that "detail" is not passed in the response.
force_authenticate(request, user=self.kive_user)
# noinspection PyUnresolvedReferences
response = self.list_view(request)
self.assertNotIn('detail', response.data)
# noinspection PyUnusedLocal
def dummy_file(content, name='dummy_file', mode='rb'):
""" Create an in-memory, file-like object.
:param str content: the contents of the file
:param str name: a name for the file
:param str mode: the mode to open the file (ignored)
:return: an object that looks like an open file handle.
"""
data_file = StringIO(content)
data_file.name = name
data_file.__enter__ = lambda: None
data_file.__exit__ = lambda extype, value, traceback: None
return data_file
def check_media_root_is_test():
if os.path.basename(settings.MEDIA_ROOT) != 'Testing':
raise RuntimeError(
"MEDIA_ROOT doesn't end with 'Testing', use test settings.")
def install_fixture_files(fixture_name):
"""
Helper that installs the FieldFiles for a given fixture.
"""
remove_fixture_files() # Remove any leftovers
fixture_files_path = os.path.join("FixtureFiles", fixture_name)
assert os.path.isdir(fixture_files_path)
for target in os.listdir(fixture_files_path):
target_path = os.path.join(settings.MEDIA_ROOT, target)
dir_to_install = os.path.join(fixture_files_path, target)
shutil.copytree(dir_to_install, target_path)
containers_path = os.path.join(settings.MEDIA_ROOT, 'Containers')
if not os.path.exists(containers_path):
os.makedirs(containers_path)
test_container_path = os.path.join(containers_path,
settings.DEFAULT_CONTAINER)
if not os.path.exists(test_container_path):
alpine_container_path = os.path.abspath(os.path.join(
__file__,
'..',
'..',
'..',
'samplecode',
'singularity',
'python2-alpine-trimmed.simg'))
os.symlink(alpine_container_path, test_container_path)
def remove_fixture_files():
"""
Helper that removes all FieldFiles used by a test fixture.
"""
check_media_root_is_test()
try:
os.makedirs(settings.MEDIA_ROOT)
# If that succeeded, then the folder is empty.
return
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
for dirname in os.listdir(settings.MEDIA_ROOT):
target_path = os.path.join(settings.MEDIA_ROOT, dirname)
shutil.rmtree(target_path)
def strip_removal_plan(plan):
plan_not_blanks = {key: value
for key, value in plan.items()
if value}
return plan_not_blanks
@contextmanager
def capture_log_stream(log_level, *logger_names):
mocked_stderr = StringIO()
stream_handler = logging.StreamHandler(mocked_stderr)
old_levels = {}
loggers = {}
for logger_name in logger_names:
logger = logging.getLogger(logger_name)
logger.addHandler(stream_handler)
old_levels[logger_name] = logger.level
logger.setLevel(log_level)
try:
yield mocked_stderr
finally:
for logger_name, logger in loggers.items():
logger.removeHandler(stream_handler)
logger.level = old_levels[logger_name]
|
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import sys
import perfection
_MAX_HEADER_LIST_SIZE = 16 * 1024 * 1024
Setting = collections.namedtuple('Setting', 'id default min max on_error')
OnError = collections.namedtuple('OnError', 'behavior code')
clamp_invalid_value = OnError('CLAMP_INVALID_VALUE', 'PROTOCOL_ERROR')
disconnect_on_invalid_value = lambda e: OnError('DISCONNECT_ON_INVALID_VALUE', e
)
DecoratedSetting = collections.namedtuple('DecoratedSetting',
'enum name setting')
_SETTINGS = {
'HEADER_TABLE_SIZE':
Setting(1, 4096, 0, 0xffffffff, clamp_invalid_value),
'ENABLE_PUSH':
Setting(2, 1, 0, 1, disconnect_on_invalid_value('PROTOCOL_ERROR')),
'MAX_CONCURRENT_STREAMS':
Setting(3, 0xffffffff, 0, 0xffffffff,
disconnect_on_invalid_value('PROTOCOL_ERROR')),
'INITIAL_WINDOW_SIZE':
Setting(4, 65535, 0, 0x7fffffff,
disconnect_on_invalid_value('FLOW_CONTROL_ERROR')),
'MAX_FRAME_SIZE':
Setting(5, 16384, 16384, 16777215,
disconnect_on_invalid_value('PROTOCOL_ERROR')),
'MAX_HEADER_LIST_SIZE':
Setting(6, _MAX_HEADER_LIST_SIZE, 0, _MAX_HEADER_LIST_SIZE,
clamp_invalid_value),
'GRPC_ALLOW_TRUE_BINARY_METADATA':
Setting(0xfe03, 0, 0, 1, clamp_invalid_value),
}
H = open('src/core/ext/transport/chttp2/transport/http2_settings.h', 'w')
C = open('src/core/ext/transport/chttp2/transport/http2_settings.c', 'w')
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print('/*', file=f)
for line in banner:
print(' * %s' % line, file=f)
print(' */', file=f)
print(file=f)
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#':
break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H, C], [line[2:].rstrip() for line in copyright])
put_banner(
[H, C],
["Automatically generated by tools/codegen/core/gen_settings_ids.py"])
print("#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H",
file=H)
print("#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H",
file=H)
print(file=H)
print("#include <stdint.h>", file=H)
print("#include <stdbool.h>", file=H)
print(file=H)
print("#include \"src/core/ext/transport/chttp2/transport/http2_settings.h\"",
file=C)
print(file=C)
print("#include <grpc/support/useful.h>", file=C)
print("#include \"src/core/lib/transport/http2_errors.h\"", file=C)
print(file=C)
p = perfection.hash_parameters(sorted(x.id for x in list(_SETTINGS.values())))
print(p)
def hash(i):
i += p.offset
x = i % p.t
y = i // p.t
return x + p.r[y]
decorated_settings = [
DecoratedSetting(hash(setting.id), name, setting)
for name, setting in _SETTINGS.items()
]
print('typedef enum {', file=H)
for decorated_setting in sorted(decorated_settings):
print(' GRPC_CHTTP2_SETTINGS_%s = %d, /* wire id %d */' %
(decorated_setting.name, decorated_setting.enum,
decorated_setting.setting.id),
file=H)
print('} grpc_chttp2_setting_id;', file=H)
print(file=H)
print('#define GRPC_CHTTP2_NUM_SETTINGS %d' %
(max(x.enum for x in decorated_settings) + 1),
file=H)
print('extern const uint16_t grpc_setting_id_to_wire_id[];', file=H)
print('const uint16_t grpc_setting_id_to_wire_id[] = {%s};' %
','.join('%d' % s for s in p.slots),
file=C)
print(file=H)
print(
"bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);",
file=H)
cgargs = {
'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
't': p.t,
'offset': abs(p.offset),
'offset_sign': '+' if p.offset > 0 else '-'
}
print("""
bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) {
uint32_t i = wire_id %(offset_sign)s %(offset)d;
uint32_t x = i %% %(t)d;
uint32_t y = i / %(t)d;
uint32_t h = x;
switch (y) {
""" % cgargs,
file=C)
for i, r in enumerate(p.r):
if not r:
continue
if r < 0:
print('case %d: h -= %d; break;' % (i, -r), file=C)
else:
print('case %d: h += %d; break;' % (i, r), file=C)
print("""
}
*out = (grpc_chttp2_setting_id)h;
return h < GPR_ARRAY_SIZE(grpc_setting_id_to_wire_id) && grpc_setting_id_to_wire_id[h] == wire_id;
}
""" % cgargs,
file=C)
print("""
typedef enum {
GRPC_CHTTP2_CLAMP_INVALID_VALUE,
GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE
} grpc_chttp2_invalid_value_behavior;
typedef struct {
const char *name;
uint32_t default_value;
uint32_t min_value;
uint32_t max_value;
grpc_chttp2_invalid_value_behavior invalid_value_behavior;
uint32_t error_value;
} grpc_chttp2_setting_parameters;
extern const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
""",
file=H)
print(
"const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {",
file=C)
i = 0
for decorated_setting in sorted(decorated_settings):
while i < decorated_setting.enum:
print(
"{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},",
file=C)
i += 1
print("{\"%s\", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s}," % (
decorated_setting.name,
decorated_setting.setting.default,
decorated_setting.setting.min,
decorated_setting.setting.max,
decorated_setting.setting.on_error.behavior,
decorated_setting.setting.on_error.code,
),
file=C)
i += 1
print("};", file=C)
print(file=H)
print("#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */",
file=H)
H.close()
C.close()
|
|
# -*- coding: utf-8 -*-
"""Redis merge reader."""
import codecs
from plaso.lib import definitions
from plaso.storage import identifiers
from plaso.storage import interface
from plaso.storage import logger
from plaso.storage.redis import redis_store
class RedisMergeReader(interface.StorageMergeReader):
"""Redis store reader for merging."""
def __init__(self, storage_writer, task, redis_client=None):
"""Initializes a Redis storage merge reader.
Args:
storage_writer (StorageWriter): storage writer.
task (Task): the task whose store is being merged.
redis_client (Optional[Redis]): Redis client to query. If specified, no
new client will be created.
Raises:
RuntimeError: if an add container method is missing.
"""
super(RedisMergeReader, self).__init__(storage_writer)
self._active_container_type = None
self._active_cursor = 0
self._active_extra_containers = []
self._add_active_container_method = None
self._add_container_type_methods = {}
self._container_types = []
self._event_data_identifier_mappings = {}
self._event_data_stream_identifier_mappings = {}
self._store = redis_store.RedisStore(
definitions.STORAGE_TYPE_TASK,
session_identifier=task.session_identifier,
task_identifier=task.identifier)
self._store.Open(redis_client=redis_client)
# Create a runtime lookup table for the add container type method. This
# prevents having to create a series of if-else checks for container types.
# The table is generated at runtime as there are no forward function
# declarations in Python.
for container_type, method_name in self._ADD_CONTAINER_TYPE_METHODS.items():
method = getattr(self, method_name, None)
if not method:
raise RuntimeError(
'Add method missing for container type: {0:s}'.format(
container_type))
self._add_container_type_methods[container_type] = method
def _AddEvent(self, event, serialized_data=None):
"""Adds an event.
Args:
event (EventObject): event.
serialized_data (Optional[bytes]): serialized form of the event.
"""
if hasattr(event, 'event_data_row_identifier'):
event_data_identifier = identifiers.SQLTableIdentifier(
self._CONTAINER_TYPE_EVENT_DATA,
event.event_data_row_identifier)
lookup_key = event_data_identifier.CopyToString()
event_data_identifier = self._event_data_identifier_mappings[lookup_key]
event.SetEventDataIdentifier(event_data_identifier)
# TODO: add event identifier mappings for event tags.
self._storage_writer.AddEvent(event, serialized_data=serialized_data)
def _AddEventData(self, event_data, serialized_data=None):
"""Adds event data.
Args:
event_data (EventData): event data.
serialized_data (bytes): serialized form of the event data.
"""
row_identifier = getattr(
event_data, '_event_data_stream_row_identifier', None)
if row_identifier is not None:
event_data_stream_identifier = identifiers.SQLTableIdentifier(
self._CONTAINER_TYPE_EVENT_DATA_STREAM, row_identifier)
lookup_key = event_data_stream_identifier.CopyToString()
event_data_stream_identifier = (
self._event_data_stream_identifier_mappings.get(lookup_key, None))
if event_data_stream_identifier:
event_data.SetEventDataStreamIdentifier(event_data_stream_identifier)
identifier = event_data.GetIdentifier()
lookup_key = identifier.CopyToString()
self._storage_writer.AddEventData(
event_data, serialized_data=serialized_data)
last_write_identifier = event_data.GetIdentifier()
self._event_data_identifier_mappings[lookup_key] = last_write_identifier
def _AddEventDataStream(self, event_data_stream, serialized_data=None):
"""Adds an event data stream.
Args:
event_data_stream (EventDataStream): event data stream.
serialized_data (bytes): serialized form of the event data stream.
"""
identifier = event_data_stream.GetIdentifier()
lookup_key = identifier.CopyToString()
self._storage_writer.AddEventDataStream(
event_data_stream, serialized_data=serialized_data)
identifier = event_data_stream.GetIdentifier()
self._event_data_stream_identifier_mappings[lookup_key] = identifier
def _PrepareForNextContainerType(self):
"""Prepares for the next container type.
This method prepares the task storage for merging the next container type.
It sets the active container type, its add method and active cursor
accordingly.
"""
self._active_container_type = self._container_types.pop(0)
self._add_active_container_method = self._add_container_type_methods.get(
self._active_container_type)
self._active_cursor = 0
def _GetContainerTypes(self):
"""Retrieves the container types to merge.
Container types not defined in _CONTAINER_TYPES are ignored and not merged.
Specific container types reference other container types, such
as event referencing event data. The names are ordered to ensure the
attribute containers are merged in the correct order.
Returns:
list[str]: names of the container types to merge.
"""
container_types = []
for container_type in self._CONTAINER_TYPES:
# pylint: disable=protected-access
if self._store._HasAttributeContainers(container_type):
container_types.append(container_type)
return container_types
def _GetAttributeContainers(
self, container_type, callback=None, cursor=0, maximum_number_of_items=0):
"""Retrieves attribute containers of the specified type.
Args:
container_type (str): attribute container type.
callback (function[StorageWriter, AttributeContainer]): function to call
after each attribute container is deserialized.
cursor (int): Redis cursor for scanning items.
maximum_number_of_items (Optional[int]): maximum number of
containers to retrieve, where 0 represent no limit.
Returns:
list(AttributeContainer): attribute containers from Redis.
"""
if not cursor:
cursor = 0
cursor, items = self._store.GetSerializedAttributeContainers(
container_type, cursor, maximum_number_of_items)
containers = []
identifiers_to_delete = []
for identifier_bytes, serialized_container in items.items():
identifier_string = codecs.decode(identifier_bytes, 'utf-8')
identifier = identifiers.RedisKeyIdentifier(identifier_string)
identifiers_to_delete.append(identifier)
container = self._DeserializeAttributeContainer(
self._active_container_type, serialized_container)
container.SetIdentifier(identifier)
if callback:
callback(self._storage_writer, container)
containers.append(container)
self._store.RemoveAttributeContainers(container_type, identifiers_to_delete)
self._active_cursor = cursor
containers = self._active_extra_containers + containers
if maximum_number_of_items:
self._active_extra_containers = containers[maximum_number_of_items:]
return containers[:maximum_number_of_items]
def MergeAttributeContainers(
self, callback=None, maximum_number_of_containers=0):
"""Reads attribute containers from a task store into the writer.
Args:
callback (Optional[function[StorageWriter, AttributeContainer]]): function
to call after each attribute container is deserialized.
maximum_number_of_containers (Optional[int]): maximum number of
containers to merge, where 0 represent no limit.
Returns:
bool: True if the entire task storage file has been merged.
Raises:
RuntimeError: if the add method for the active attribute container
type is missing.
"""
if not self._container_types:
self._container_types = self._GetContainerTypes()
number_of_containers = 0
while (self._active_cursor or self._container_types
or self._active_extra_containers):
if not self._active_cursor and not self._active_extra_containers:
self._PrepareForNextContainerType()
containers = self._GetAttributeContainers(
self._active_container_type, callback=callback,
cursor=self._active_cursor,
maximum_number_of_items=maximum_number_of_containers)
if not containers:
self._active_cursor = 0
continue
for container in containers:
self._add_active_container_method(container)
number_of_containers += 1
if 0 < maximum_number_of_containers <= number_of_containers:
logger.debug(
'Only merged {0:d} containers'.format(number_of_containers))
return False
logger.debug('Merged {0:d} containers'.format(number_of_containers))
# While all the containers have been merged, the 'merging' key is still
# present, so we still need to remove the store.
self._store.Remove()
return True
|
|
#!/usr/bin/env python
__author__ = 'greghines'
import datetime
import pymongo
from geoip import geolite2
from pytz import timezone
import pytz
import time
import IP2Location
from time import mktime
import pygeoip
import csv
import geoip2.webservice
import socket
# connect to the mongodb server
client = pymongo.MongoClient()
db = client['penguin_2015-06-01']
subjects = db["penguin_subjects"]
classifications = db["penguin_classifications"]
users = db["penguin_users"]
gmt = timezone("GMT")
ballots = {}
to_skip = []
from tzwhere import tzwhere
w = tzwhere.tzwhere()
ip_to_tmz = {}
print "first batch"
with open("/home/greg/Databases/batch-request-c2f9da3a-0952-11e5-a81d-689f178bad51/batch-lookup.csv","rb") as f:
ips = csv.reader(f, delimiter=',', quotechar='|')
next(f)
for row in ips:
# ip address, lat,long
try:
ip_address,lat,long = row[0],float(row[10]),float(row[11])
# print ip_address
tmz = w.tzNameAt(lat, long)
ip_to_tmz[str(ip_address)] = timezone(tmz)
except (ValueError,AttributeError) as e:
# print row
continue
print "second batch"
with open("/home/greg/Databases/batch-request-91297ad0-09d7-11e5-910c-6ed4178bad51/batch-lookup.csv","rb") as f:
ips = csv.reader(f, delimiter=',', quotechar='|')
next(f)
for row in ips:
# ip address, lat,long
try:
ip_address,lat,long = row[0],float(row[10]),float(row[11])
# print ip_address
tmz = w.tzNameAt(lat, long)
# assert str(ip_address) not in ip_to_tmz
ip_to_tmz[str(ip_address)] = timezone(tmz)
except (ValueError,AttributeError) as e:
# print row
continue
print "third batch"
with open("/home/greg/Databases/batch-request-520e8ab6-09dc-11e5-90b4-27a7178bad51/batch-lookup.csv","rb") as f:
ips = csv.reader(f, delimiter=',', quotechar='|')
next(f)
for row in ips:
# ip address, lat,long
try:
ip_address,lat,long = row[0],float(row[10]),float(row[11])
# print ip_address
tmz = w.tzNameAt(lat, long)
ip_to_tmz[str(ip_address)] = timezone(tmz)
except (ValueError,AttributeError) as e:
# print row
continue
with open("/home/greg/Databases/timezones","rb") as f:
ips = csv.reader(f, delimiter=' ')
for ip_address,tmz in ips:
print ip_address,tmz
ip_to_tmz[str(ip_address)] = timezone(tmz)
# assert False
# if tmz is None:
# print row
# assert False
# with open("/home/greg/Databases/geo_ips.txt","rb") as f:
# #ips = csv.reader(f, delimiter='\t', quotechar='|')
# l = "a"
# while l:
# l = f.readline()
# if not l:
# break
#
# words = l.split("\t")
# try:
# socket.inet_aton(words[0])
# print "ip :: " + words[0]
#
# for i in range(4):
# words = f.readline().split("\t")
# print words
# print
# except socket.error:
# print "not valid"
#
# # words = [""]
# # # while words[-1] != '\n':
# # for i in range(4):
# # l = f.readline()
# # words = l.split("\t")
# #
# # print words
# # print
#
#
# assert False
# with open("/home/greg/bad_ipaddresses.txt","rb") as csvfile:
# ips = csv.reader(csvfile, delimiter=',', quotechar='|')
# for row in ips:
# to_skip.append(row[0])
# print to_skip
bad_ips = set()
# unknown_ips = set()
gic = pygeoip.GeoIP('/home/greg/Databases/GeoLiteCity.dat',flags=pygeoip.const.MMAP_CACHE)
weird_ips = set()
# client = geoip2.webservice.Client("101664", '3Wcy5fQZpepH')
extra_info = {}
# print classifications.count()#{"created_at":{"$gte":datetime.datetime(2015, 4, 25)},"created_at":{"$lte":datetime.datetime(2015, 5, 25)}})
for ii,classification in enumerate(classifications.find({"created_at":{"$gte":datetime.datetime(2015, 4, 25),"$lte":datetime.datetime(2015, 5, 25)}})):
#print classification
print ii
# try:
try:
user = classification["user_name"]
except KeyError:
continue
our_time_stamp = classification["created_at"]
ip_address = str(classification["user_ip"])
# print ip_address
match = geolite2.lookup(ip_address)
# rec = gic.record_by_addr(ip_address)
# print ip_address == "162.232.194.146"
# print "162.232.194.146" in ip_to_tmz
# print ip_to_tmz["162.232.194.146"]
# print match
if (match is None) or (match.timezone == 'None') or (match.timezone is None):
# print "="
try:
tmz = ip_to_tmz[ip_address]
# print "----"
except KeyError:
# print ip_address
bad_ips.add(ip_address)
print classification
print "**"
assert False
continue
# unknown_ips.add(ip_address)
# if rec["time_zone"] is None:
# # print rec
# # response = client.insights(ip_address)
# # print response
# # assert False
# bad_ips.add(ip_address)
else:
try:
tmz = timezone(match.timezone)
except (pytz.exceptions.UnknownTimeZoneError,AttributeError) as e:
bad_ips.add(ip_address)
print match
print ip_address
print "-"
assert False
continue
# print match
# print match
# time_index = ["finished_at" in ann for ann in classification["annotations"]].index(True)
# finished_at_str = classification["annotations"][time_index]["finished_at"]
#
# started_at_str = classification["annotations"][time_index]["started_at"]
# started_at = time.strptime(started_at_str,"%a, %d %b %Y %H:%M:%S %Z")
#
# started_at=datetime.datetime(2015,started_at.tm_mon,started_at.tm_mday,started_at.tm_hour,started_at.tm_min)
#
#
#
# # convert from str into datetime instance
# finished_at = time.strptime(finished_at_str,"%a, %d %b %Y %H:%M:%S %Z")
#
# temp = datetime.datetime(2015,finished_at.tm_mon,finished_at.tm_mday,finished_at.tm_hour,finished_at.tm_min)
#
# # add the timezone
# finished_at = gmt.localize(datetime.datetime(2015,finished_at.tm_mon,finished_at.tm_mday,finished_at.tm_hour,finished_at.tm_min))
created_at = gmt.localize(our_time_stamp)
# if our_time_stamp>temp:
# delta_t = our_time_stamp-temp
# else:
# delta_t = temp-our_time_stamp
# print delta_t.seconds
# if delta_t.seconds >= 5000:
# print ip_address
# print classification["created_at"]
# print classification["updated_at"]
# print classification["annotations"]
# print temp
# print our_time_stamp
# print delta_t
# print
# weird_ips.add(ip_address)
contest_start = datetime.datetime(2015, 4, 25)
contest_start = tmz.localize(contest_start)
contest_end = datetime.datetime(2015, 5, 25)
contest_end = tmz.localize(contest_end)
# normalize to the correct time zone
# finished_at = finished_at.astimezone(tmz)
created_at = created_at.astimezone(tmz)
# print (created_at >= contest_start) and (created_at <= contest_end)
# and convert it to the local timezone
# print finished_at
date = (created_at.month,created_at.day)
if user not in ballots:
ballots[user] = {}
if date not in ballots[user]:
ballots[user][date] = 1
else:
ballots[user][date] += 1
if not user in extra_info:
extra_info[user] = set()
extra_info[user].add(created_at)
ballot_list = []
for user in ballots:
for date in ballots[user]:
if ballots[user][date] >= 10:
# todo : check that they want to be in contest
try:
u = users.find_one({"name":user})
preferences = u["preferences"]["penguin"]
if ("competition_opt_in" in preferences) and (preferences["competition_opt_in"] == "true"):
ballot_list.append(user)
except KeyError:
print u
continue
import random
winner = random.sample(ballot_list,3)
print winner
|
|
from __future__ import with_statement
import binascii
import datetime
import pytest
import redis
import time
from redis._compat import (unichr, u, b, ascii_letters, iteritems, iterkeys,
itervalues)
from redis.client import parse_info
from redis import exceptions
from .conftest import skip_if_server_version_lt
# won't need this after next version of pytest
from distutils.version import StrictVersion
# RESPONSE CALLBACKS
class TestResponseCallbacks(object):
"Tests for the response callback system"
def test_response_callbacks(self, r):
assert r.response_callbacks == redis.Redis.RESPONSE_CALLBACKS
assert id(r.response_callbacks) != id(redis.Redis.RESPONSE_CALLBACKS)
r.set_response_callback('GET', lambda x: 'static')
r['a'] = 'foo'
assert r['a'] == 'static'
class TestRedisCommands(object):
def test_command_on_invalid_key_type(self, r):
r.lpush('a', '1')
with pytest.raises(redis.ResponseError):
r['a']
### SERVER INFORMATION ###
def test_client_list(self, r):
clients = r.client_list()
assert isinstance(clients[0], dict)
assert 'addr' in clients[0]
@skip_if_server_version_lt('2.6.9')
def test_client_getname(self, r):
assert r.client_getname() is None
@skip_if_server_version_lt('2.6.9')
def test_client_setname(self, r):
assert r.client_setname('redis_py_test')
assert r.client_getname() == 'redis_py_test'
def test_config_get(self, r):
data = r.config_get()
assert 'maxmemory' in data
assert data['maxmemory'].isdigit()
def test_config_resetstat(self, r):
r.ping()
assert int(r.info()['total_commands_processed']) > 1
r.config_resetstat()
assert int(r.info()['total_commands_processed']) == 1
def test_config_set(self, r):
data = r.config_get()
rdbname = data['dbfilename']
try:
assert r.config_set('dbfilename', 'redis_py_test.rdb')
assert r.config_get()['dbfilename'] == 'redis_py_test.rdb'
finally:
assert r.config_set('dbfilename', rdbname)
def test_dbsize(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
assert r.dbsize() == 2
def test_echo(self, r):
assert r.echo('foo bar') == b('foo bar')
def test_info(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
info = r.info()
assert isinstance(info, dict)
assert info['db9']['keys'] == 2
def test_lastsave(self, r):
assert isinstance(r.lastsave(), datetime.datetime)
def test_object(self, r):
r['a'] = 'foo'
assert isinstance(r.object('refcount', 'a'), int)
assert isinstance(r.object('idletime', 'a'), int)
assert r.object('encoding', 'a') == b('raw')
def test_ping(self, r):
assert r.ping()
@skip_if_server_version_lt('2.6.0')
def test_time(self, r):
t = r.time()
assert len(t) == 2
assert isinstance(t[0], int)
assert isinstance(t[1], int)
### BASIC KEY COMMANDS ###
def test_append(self, r):
assert r.append('a', 'a1') == 2
assert r['a'] == b('a1')
assert r.append('a', 'a2') == 4
assert r['a'] == b('a1a2')
@skip_if_server_version_lt('2.6.0')
def test_bitcount(self, r):
r.setbit('a', 5, True)
assert r.bitcount('a') == 1
r.setbit('a', 6, True)
assert r.bitcount('a') == 2
r.setbit('a', 5, False)
assert r.bitcount('a') == 1
r.setbit('a', 9, True)
r.setbit('a', 17, True)
r.setbit('a', 25, True)
r.setbit('a', 33, True)
assert r.bitcount('a') == 5
assert r.bitcount('a', 0, -1) == 5
assert r.bitcount('a', 2, 3) == 2
assert r.bitcount('a', 2, -1) == 3
assert r.bitcount('a', -2, -1) == 2
assert r.bitcount('a', 1, 1) == 1
@skip_if_server_version_lt('2.6.0')
def test_bitop_not_empty_string(self, r):
r['a'] = ''
r.bitop('not', 'r', 'a')
assert r.get('r') is None
@skip_if_server_version_lt('2.6.0')
def test_bitop_not(self, r):
test_str = b('\xAA\x00\xFF\x55')
correct = ~0xAA00FF55 & 0xFFFFFFFF
r['a'] = test_str
r.bitop('not', 'r', 'a')
assert int(binascii.hexlify(r['r']), 16) == correct
@skip_if_server_version_lt('2.6.0')
def test_bitop_not_in_place(self, r):
test_str = b('\xAA\x00\xFF\x55')
correct = ~0xAA00FF55 & 0xFFFFFFFF
r['a'] = test_str
r.bitop('not', 'a', 'a')
assert int(binascii.hexlify(r['a']), 16) == correct
@skip_if_server_version_lt('2.6.0')
def test_bitop_single_string(self, r):
test_str = b('\x01\x02\xFF')
r['a'] = test_str
r.bitop('and', 'res1', 'a')
r.bitop('or', 'res2', 'a')
r.bitop('xor', 'res3', 'a')
assert r['res1'] == test_str
assert r['res2'] == test_str
assert r['res3'] == test_str
@skip_if_server_version_lt('2.6.0')
def test_bitop_string_operands(self, r):
r['a'] = b('\x01\x02\xFF\xFF')
r['b'] = b('\x01\x02\xFF')
r.bitop('and', 'res1', 'a', 'b')
r.bitop('or', 'res2', 'a', 'b')
r.bitop('xor', 'res3', 'a', 'b')
assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00
assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF
assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF
def test_decr(self, r):
assert r.decr('a') == -1
assert r['a'] == b('-1')
assert r.decr('a') == -2
assert r['a'] == b('-2')
assert r.decr('a', amount=5) == -7
assert r['a'] == b('-7')
def test_delete(self, r):
assert r.delete('a') == 0
r['a'] = 'foo'
assert r.delete('a') == 1
def test_delete_with_multiple_keys(self, r):
r['a'] = 'foo'
r['b'] = 'bar'
assert r.delete('a', 'b') == 2
assert r.get('a') is None
assert r.get('b') is None
def test_delitem(self, r):
r['a'] = 'foo'
del r['a']
assert r.get('a') is None
@skip_if_server_version_lt('2.6.0')
def test_dump_and_restore(self, r):
r['a'] = 'foo'
dumped = r.dump('a')
del r['a']
r.restore('a', 0, dumped)
assert r['a'] == b('foo')
def test_exists(self, r):
assert not r.exists('a')
r['a'] = 'foo'
assert r.exists('a')
def test_exists_contains(self, r):
assert 'a' not in r
r['a'] = 'foo'
assert 'a' in r
def test_expire(self, r):
assert not r.expire('a', 10)
r['a'] = 'foo'
assert r.expire('a', 10)
assert 0 < r.ttl('a') <= 10
assert r.persist('a')
assert not r.ttl('a')
def test_expireat_datetime(self, r):
expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1)
r['a'] = 'foo'
assert r.expireat('a', expire_at)
assert 0 < r.ttl('a') <= 60
def test_expireat_no_key(self, r):
expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1)
assert not r.expireat('a', expire_at)
def test_expireat_unixtime(self, r):
expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1)
r['a'] = 'foo'
expire_at_seconds = int(time.mktime(expire_at.timetuple()))
assert r.expireat('a', expire_at_seconds)
assert 0 < r.ttl('a') <= 60
def test_get_and_set(self, r):
# get and set can't be tested independently of each other
assert r.get('a') is None
byte_string = b('value')
integer = 5
unicode_string = unichr(3456) + u('abcd') + unichr(3421)
assert r.set('byte_string', byte_string)
assert r.set('integer', 5)
assert r.set('unicode_string', unicode_string)
assert r.get('byte_string') == byte_string
assert r.get('integer') == b(str(integer))
assert r.get('unicode_string').decode('utf-8') == unicode_string
def test_getitem_and_setitem(self, r):
r['a'] = 'bar'
assert r['a'] == b('bar')
def test_getitem_raises_keyerror_for_missing_key(self, r):
with pytest.raises(KeyError):
r['a']
def test_get_set_bit(self, r):
# no value
assert not r.getbit('a', 5)
# set bit 5
assert not r.setbit('a', 5, True)
assert r.getbit('a', 5)
# unset bit 4
assert not r.setbit('a', 4, False)
assert not r.getbit('a', 4)
# set bit 4
assert not r.setbit('a', 4, True)
assert r.getbit('a', 4)
# set bit 5 again
assert r.setbit('a', 5, True)
assert r.getbit('a', 5)
def test_getrange(self, r):
r['a'] = 'foo'
assert r.getrange('a', 0, 0) == b('f')
assert r.getrange('a', 0, 2) == b('foo')
assert r.getrange('a', 3, 4) == b('')
def test_getset(self, r):
assert r.getset('a', 'foo') is None
assert r.getset('a', 'bar') == b('foo')
def test_incr(self, r):
assert r.incr('a') == 1
assert r['a'] == b('1')
assert r.incr('a') == 2
assert r['a'] == b('2')
assert r.incr('a', amount=5) == 7
assert r['a'] == b('7')
def test_incrby(self, r):
assert r.incrby('a') == 1
assert r.incrby('a', 4) == 5
assert r['a'] == b('5')
@skip_if_server_version_lt('2.6.0')
def test_incrbyfloat(self, r):
assert r.incrbyfloat('a') == 1.0
assert r['a'] == b('1')
assert r.incrbyfloat('a', 1.1) == 2.1
assert float(r['a']) == float(2.1)
def test_keys(self, r):
assert r.keys() == []
keys_with_underscores = set([b('test_a'), b('test_b')])
keys = keys_with_underscores.union(set([b('testc')]))
for key in keys:
r[key] = 1
assert set(r.keys(pattern='test_*')) == keys_with_underscores
assert set(r.keys(pattern='test*')) == keys
def test_mget(self, r):
assert r.mget(['a', 'b']) == [None, None]
r['a'] = '1'
r['b'] = '2'
r['c'] = '3'
assert r.mget('a', 'other', 'b', 'c') == [b('1'), None, b('2'), b('3')]
def test_mset(self, r):
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert r.mset(d)
for k, v in iteritems(d):
assert r[k] == v
def test_mset_kwargs(self, r):
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert r.mset(**d)
for k, v in iteritems(d):
assert r[k] == v
def test_msetnx(self, r):
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert r.msetnx(d)
d2 = {'a': b('x'), 'd': b('4')}
assert not r.msetnx(d2)
for k, v in iteritems(d):
assert r[k] == v
assert r.get('d') is None
def test_msetnx_kwargs(self, r):
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert r.msetnx(**d)
d2 = {'a': b('x'), 'd': b('4')}
assert not r.msetnx(**d2)
for k, v in iteritems(d):
assert r[k] == v
assert r.get('d') is None
@skip_if_server_version_lt('2.6.0')
def test_pexpire(self, r):
assert not r.pexpire('a', 60000)
r['a'] = 'foo'
assert r.pexpire('a', 60000)
assert 0 < r.pttl('a') <= 60000
assert r.persist('a')
assert r.pttl('a') is None
@skip_if_server_version_lt('2.6.0')
def test_pexpireat_datetime(self, r):
expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1)
r['a'] = 'foo'
assert r.pexpireat('a', expire_at)
assert 0 < r.pttl('a') <= 60000
@skip_if_server_version_lt('2.6.0')
def test_pexpireat_no_key(self, r):
expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1)
assert not r.pexpireat('a', expire_at)
@skip_if_server_version_lt('2.6.0')
def test_pexpireat_unixtime(self, r):
expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1)
r['a'] = 'foo'
expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000
assert r.pexpireat('a', expire_at_seconds)
assert 0 < r.pttl('a') <= 60000
@skip_if_server_version_lt('2.6.0')
def test_psetex(self, r):
assert r.psetex('a', 1000, 'value')
assert r['a'] == b('value')
assert 0 < r.pttl('a') <= 1000
@skip_if_server_version_lt('2.6.0')
def test_psetex_timedelta(self, r):
expire_at = datetime.timedelta(milliseconds=1000)
assert r.psetex('a', expire_at, 'value')
assert r['a'] == b('value')
assert 0 < r.pttl('a') <= 1000
def test_randomkey(self, r):
assert r.randomkey() is None
for key in ('a', 'b', 'c'):
r[key] = 1
assert r.randomkey() in (b('a'), b('b'), b('c'))
def test_rename(self, r):
r['a'] = '1'
assert r.rename('a', 'b')
assert r.get('a') is None
assert r['b'] == b('1')
def test_renamenx(self, r):
r['a'] = '1'
r['b'] = '2'
assert not r.renamenx('a', 'b')
assert r['a'] == b('1')
assert r['b'] == b('2')
@skip_if_server_version_lt('2.6.0')
def test_set_nx(self, r):
assert r.set('a', '1', nx=True)
assert not r.set('a', '2', nx=True)
assert r['a'] == b('1')
@skip_if_server_version_lt('2.6.0')
def test_set_xx(self, r):
assert not r.set('a', '1', xx=True)
assert r.get('a') is None
r['a'] = 'bar'
assert r.set('a', '2', xx=True)
assert r.get('a') == b('2')
@skip_if_server_version_lt('2.6.0')
def test_set_px(self, r):
assert r.set('a', '1', px=10000)
assert r['a'] == b('1')
assert 0 < r.pttl('a') <= 10000
assert 0 < r.ttl('a') <= 10
@skip_if_server_version_lt('2.6.0')
def test_set_px_timedelta(self, r):
expire_at = datetime.timedelta(milliseconds=1000)
assert r.set('a', '1', px=expire_at)
assert 0 < r.pttl('a') <= 1000
assert 0 < r.ttl('a') <= 1
@skip_if_server_version_lt('2.6.0')
def test_set_ex(self, r):
assert r.set('a', '1', ex=10)
assert 0 < r.ttl('a') <= 10
@skip_if_server_version_lt('2.6.0')
def test_set_ex_timedelta(self, r):
expire_at = datetime.timedelta(seconds=60)
assert r.set('a', '1', ex=expire_at)
assert 0 < r.ttl('a') <= 60
@skip_if_server_version_lt('2.6.0')
def test_set_multipleoptions(self, r):
r['a'] = 'val'
assert r.set('a', '1', xx=True, px=10000)
assert 0 < r.ttl('a') <= 10
def test_setex(self, r):
assert r.setex('a', '1', 60)
assert r['a'] == b('1')
assert 0 < r.ttl('a') <= 60
def test_setnx(self, r):
assert r.setnx('a', '1')
assert r['a'] == b('1')
assert not r.setnx('a', '2')
assert r['a'] == b('1')
def test_setrange(self, r):
assert r.setrange('a', 5, 'foo') == 8
assert r['a'] == b('\0\0\0\0\0foo')
r['a'] = 'abcdefghijh'
assert r.setrange('a', 6, '12345') == 11
assert r['a'] == b('abcdef12345')
def test_strlen(self, r):
r['a'] = 'foo'
assert r.strlen('a') == 3
def test_substr(self, r):
r['a'] = '0123456789'
assert r.substr('a', 0) == b('0123456789')
assert r.substr('a', 2) == b('23456789')
assert r.substr('a', 3, 5) == b('345')
assert r.substr('a', 3, -2) == b('345678')
def test_type(self, r):
assert r.type('a') == b('none')
r['a'] = '1'
assert r.type('a') == b('string')
del r['a']
r.lpush('a', '1')
assert r.type('a') == b('list')
del r['a']
r.sadd('a', '1')
assert r.type('a') == b('set')
del r['a']
r.zadd('a', **{'1': 1})
assert r.type('a') == b('zset')
#### LIST COMMANDS ####
def test_blpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('3'))
assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('4'))
assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('1'))
assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('2'))
assert r.blpop(['b', 'a'], timeout=1) is None
r.rpush('c', '1')
assert r.blpop('c', timeout=1) == (b('c'), b('1'))
def test_brpop(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('4'))
assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('3'))
assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('2'))
assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('1'))
assert r.brpop(['b', 'a'], timeout=1) is None
r.rpush('c', '1')
assert r.brpop('c', timeout=1) == (b('c'), b('1'))
def test_brpoplpush(self, r):
r.rpush('a', '1', '2')
r.rpush('b', '3', '4')
assert r.brpoplpush('a', 'b') == b('2')
assert r.brpoplpush('a', 'b') == b('1')
assert r.brpoplpush('a', 'b', timeout=1) is None
assert r.lrange('a', 0, -1) == []
assert r.lrange('b', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
def test_brpoplpush_empty_string(self, r):
r.rpush('a', '')
assert r.brpoplpush('a', 'b') == b('')
def test_lindex(self, r):
r.rpush('a', '1', '2', '3')
assert r.lindex('a', '0') == b('1')
assert r.lindex('a', '1') == b('2')
assert r.lindex('a', '2') == b('3')
def test_linsert(self, r):
r.rpush('a', '1', '2', '3')
assert r.linsert('a', 'after', '2', '2.5') == 4
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')]
assert r.linsert('a', 'before', '2', '1.5') == 5
assert r.lrange('a', 0, -1) == \
[b('1'), b('1.5'), b('2'), b('2.5'), b('3')]
def test_llen(self, r):
r.rpush('a', '1', '2', '3')
assert r.llen('a') == 3
def test_lpop(self, r):
r.rpush('a', '1', '2', '3')
assert r.lpop('a') == b('1')
assert r.lpop('a') == b('2')
assert r.lpop('a') == b('3')
assert r.lpop('a') is None
def test_lpush(self, r):
assert r.lpush('a', '1') == 1
assert r.lpush('a', '2') == 2
assert r.lpush('a', '3', '4') == 4
assert r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')]
def test_lpushx(self, r):
assert r.lpushx('a', '1') == 0
assert r.lrange('a', 0, -1) == []
r.rpush('a', '1', '2', '3')
assert r.lpushx('a', '4') == 4
assert r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')]
def test_lrange(self, r):
r.rpush('a', '1', '2', '3', '4', '5')
assert r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')]
assert r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')]
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')]
def test_lrem(self, r):
r.rpush('a', '1', '1', '1', '1')
assert r.lrem('a', '1', 1) == 1
assert r.lrange('a', 0, -1) == [b('1'), b('1'), b('1')]
assert r.lrem('a', '1') == 3
assert r.lrange('a', 0, -1) == []
def test_lset(self, r):
r.rpush('a', '1', '2', '3')
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3')]
assert r.lset('a', 1, '4')
assert r.lrange('a', 0, 2) == [b('1'), b('4'), b('3')]
def test_ltrim(self, r):
r.rpush('a', '1', '2', '3')
assert r.ltrim('a', 0, 1)
assert r.lrange('a', 0, -1) == [b('1'), b('2')]
def test_rpop(self, r):
r.rpush('a', '1', '2', '3')
assert r.rpop('a') == b('3')
assert r.rpop('a') == b('2')
assert r.rpop('a') == b('1')
assert r.rpop('a') is None
def test_rpoplpush(self, r):
r.rpush('a', 'a1', 'a2', 'a3')
r.rpush('b', 'b1', 'b2', 'b3')
assert r.rpoplpush('a', 'b') == b('a3')
assert r.lrange('a', 0, -1) == [b('a1'), b('a2')]
assert r.lrange('b', 0, -1) == [b('a3'), b('b1'), b('b2'), b('b3')]
def test_rpush(self, r):
assert r.rpush('a', '1') == 1
assert r.rpush('a', '2') == 2
assert r.rpush('a', '3', '4') == 4
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
def test_rpushx(self, r):
assert r.rpushx('a', 'b') == 0
assert r.lrange('a', 0, -1) == []
r.rpush('a', '1', '2', '3')
assert r.rpushx('a', '4') == 4
assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
### SCAN COMMANDS ###
@skip_if_server_version_lt('2.8.0')
def test_scan(self, r):
r.set('a', 1)
r.set('b', 2)
r.set('c', 3)
cursor, keys = r.scan()
assert cursor == b('0')
assert set(keys) == set([b('a'), b('b'), b('c')])
_, keys = r.scan(match='a')
assert set(keys) == set([b('a')])
@skip_if_server_version_lt('2.8.0')
def test_sscan(self, r):
r.sadd('a', 1, 2, 3)
cursor, members = r.sscan('a')
assert cursor == b('0')
assert set(members) == set([b('1'), b('2'), b('3')])
_, members = r.sscan('a', match=b('1'))
assert set(members) == set([b('1')])
@skip_if_server_version_lt('2.8.0')
def test_hscan(self, r):
r.hmset('a', {'a': 1, 'b': 2, 'c': 3})
cursor, dic = r.hscan('a')
assert cursor == b('0')
assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')}
_, dic = r.hscan('a', match='a')
assert dic == {b('a'): b('1')}
@skip_if_server_version_lt('2.8.0')
def test_zscan(self, r):
r.zadd('a', 'a', 1, 'b', 2, 'c', 3)
cursor, pairs = r.zscan('a')
assert cursor == b('0')
assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)])
_, pairs = r.zscan('a', match='a')
assert set(pairs) == set([(b('a'), 1)])
### SET COMMANDS ###
def test_sadd(self, r):
members = set([b('1'), b('2'), b('3')])
r.sadd('a', *members)
assert r.smembers('a') == members
def test_scard(self, r):
r.sadd('a', '1', '2', '3')
assert r.scard('a') == 3
def test_sdiff(self, r):
r.sadd('a', '1', '2', '3')
assert r.sdiff('a', 'b') == set([b('1'), b('2'), b('3')])
r.sadd('b', '2', '3')
assert r.sdiff('a', 'b') == set([b('1')])
def test_sdiffstore(self, r):
r.sadd('a', '1', '2', '3')
assert r.sdiffstore('c', 'a', 'b') == 3
assert r.smembers('c') == set([b('1'), b('2'), b('3')])
r.sadd('b', '2', '3')
assert r.sdiffstore('c', 'a', 'b') == 1
assert r.smembers('c') == set([b('1')])
def test_sinter(self, r):
r.sadd('a', '1', '2', '3')
assert r.sinter('a', 'b') == set()
r.sadd('b', '2', '3')
assert r.sinter('a', 'b') == set([b('2'), b('3')])
def test_sinterstore(self, r):
r.sadd('a', '1', '2', '3')
assert r.sinterstore('c', 'a', 'b') == 0
assert r.smembers('c') == set()
r.sadd('b', '2', '3')
assert r.sinterstore('c', 'a', 'b') == 2
assert r.smembers('c') == set([b('2'), b('3')])
def test_sismember(self, r):
r.sadd('a', '1', '2', '3')
assert r.sismember('a', '1')
assert r.sismember('a', '2')
assert r.sismember('a', '3')
assert not r.sismember('a', '4')
def test_smembers(self, r):
r.sadd('a', '1', '2', '3')
assert r.smembers('a') == set([b('1'), b('2'), b('3')])
def test_smove(self, r):
r.sadd('a', 'a1', 'a2')
r.sadd('b', 'b1', 'b2')
assert r.smove('a', 'b', 'a1')
assert r.smembers('a') == set([b('a2')])
assert r.smembers('b') == set([b('b1'), b('b2'), b('a1')])
def test_spop(self, r):
s = [b('1'), b('2'), b('3')]
r.sadd('a', *s)
value = r.spop('a')
assert value in s
assert r.smembers('a') == set(s) - set([value])
def test_srandmember(self, r):
s = [b('1'), b('2'), b('3')]
r.sadd('a', *s)
assert r.srandmember('a') in s
@skip_if_server_version_lt('2.6.0')
def test_srandmember_multi_value(self, r):
s = [b('1'), b('2'), b('3')]
r.sadd('a', *s)
randoms = r.srandmember('a', number=2)
assert len(randoms) == 2
assert set(randoms).intersection(s) == set(randoms)
def test_srem(self, r):
r.sadd('a', '1', '2', '3', '4')
assert r.srem('a', '5') == 0
assert r.srem('a', '2', '4') == 2
assert r.smembers('a') == set([b('1'), b('3')])
def test_sunion(self, r):
r.sadd('a', '1', '2')
r.sadd('b', '2', '3')
assert r.sunion('a', 'b') == set([b('1'), b('2'), b('3')])
def test_sunionstore(self, r):
r.sadd('a', '1', '2')
r.sadd('b', '2', '3')
assert r.sunionstore('c', 'a', 'b') == 3
assert r.smembers('c') == set([b('1'), b('2'), b('3')])
### SORTED SET COMMANDS ###
def test_zadd(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zrange('a', 0, -1) == [b('a1'), b('a2'), b('a3')]
def test_zcard(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zcard('a') == 3
def test_zcount(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zcount('a', '-inf', '+inf') == 3
assert r.zcount('a', 1, 2) == 2
assert r.zcount('a', 10, 20) == 0
def test_zincrby(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zincrby('a', 'a2') == 3.0
assert r.zincrby('a', 'a3', amount=5) == 8.0
assert r.zscore('a', 'a2') == 3.0
assert r.zscore('a', 'a3') == 8.0
def test_zinterstore_sum(self, r):
r.zadd('a', a1=1, a2=1, a3=1)
r.zadd('b', a1=2, a2=2, a3=2)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zinterstore('d', ['a', 'b', 'c']) == 2
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a3'), 8), (b('a1'), 9)]
def test_zinterstore_max(self, r):
r.zadd('a', a1=1, a2=1, a3=1)
r.zadd('b', a1=2, a2=2, a3=2)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a3'), 5), (b('a1'), 6)]
def test_zinterstore_min(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
r.zadd('b', a1=2, a2=3, a3=5)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a1'), 1), (b('a3'), 3)]
def test_zinterstore_with_weight(self, r):
r.zadd('a', a1=1, a2=1, a3=1)
r.zadd('b', a1=2, a2=2, a3=2)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a3'), 20), (b('a1'), 23)]
def test_zrange(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zrange('a', 0, 1) == [b('a1'), b('a2')]
assert r.zrange('a', 1, 2) == [b('a2'), b('a3')]
# withscores
assert r.zrange('a', 0, 1, withscores=True) == \
[(b('a1'), 1.0), (b('a2'), 2.0)]
assert r.zrange('a', 1, 2, withscores=True) == \
[(b('a2'), 2.0), (b('a3'), 3.0)]
# custom score function
assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \
[(b('a1'), 1), (b('a2'), 2)]
def test_zrangebyscore(self, r):
r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5)
assert r.zrangebyscore('a', 2, 4) == [b('a2'), b('a3'), b('a4')]
# slicing with start/num
assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \
[b('a3'), b('a4')]
# withscores
assert r.zrangebyscore('a', 2, 4, withscores=True) == \
[(b('a2'), 2.0), (b('a3'), 3.0), (b('a4'), 4.0)]
# custom score function
assert r.zrangebyscore('a', 2, 4, withscores=True,
score_cast_func=int) == \
[(b('a2'), 2), (b('a3'), 3), (b('a4'), 4)]
def test_zrank(self, r):
r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5)
assert r.zrank('a', 'a1') == 0
assert r.zrank('a', 'a2') == 1
assert r.zrank('a', 'a6') is None
def test_zrem(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zrem('a', 'a2') == 1
assert r.zrange('a', 0, -1) == [b('a1'), b('a3')]
assert r.zrem('a', 'b') == 0
assert r.zrange('a', 0, -1) == [b('a1'), b('a3')]
def test_zrem_multiple_keys(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zrem('a', 'a1', 'a2') == 2
assert r.zrange('a', 0, 5) == [b('a3')]
def test_zremrangebyrank(self, r):
r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5)
assert r.zremrangebyrank('a', 1, 3) == 3
assert r.zrange('a', 0, 5) == [b('a1'), b('a5')]
def test_zremrangebyscore(self, r):
r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5)
assert r.zremrangebyscore('a', 2, 4) == 3
assert r.zrange('a', 0, -1) == [b('a1'), b('a5')]
assert r.zremrangebyscore('a', 2, 4) == 0
assert r.zrange('a', 0, -1) == [b('a1'), b('a5')]
def test_zrevrange(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zrevrange('a', 0, 1) == [b('a3'), b('a2')]
assert r.zrevrange('a', 1, 2) == [b('a2'), b('a1')]
# withscores
assert r.zrevrange('a', 0, 1, withscores=True) == \
[(b('a3'), 3.0), (b('a2'), 2.0)]
assert r.zrevrange('a', 1, 2, withscores=True) == \
[(b('a2'), 2.0), (b('a1'), 1.0)]
# custom score function
assert r.zrevrange('a', 0, 1, withscores=True,
score_cast_func=int) == \
[(b('a3'), 3.0), (b('a2'), 2.0)]
def test_zrevrangebyscore(self, r):
r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5)
assert r.zrevrangebyscore('a', 4, 2) == [b('a4'), b('a3'), b('a2')]
# slicing with start/num
assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \
[b('a3'), b('a2')]
# withscores
assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \
[(b('a4'), 4.0), (b('a3'), 3.0), (b('a2'), 2.0)]
# custom score function
assert r.zrevrangebyscore('a', 4, 2, withscores=True,
score_cast_func=int) == \
[(b('a4'), 4), (b('a3'), 3), (b('a2'), 2)]
def test_zrevrank(self, r):
r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5)
assert r.zrevrank('a', 'a1') == 4
assert r.zrevrank('a', 'a2') == 3
assert r.zrevrank('a', 'a6') is None
def test_zscore(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
assert r.zscore('a', 'a1') == 1.0
assert r.zscore('a', 'a2') == 2.0
assert r.zscore('a', 'a4') is None
def test_zunionstore_sum(self, r):
r.zadd('a', a1=1, a2=1, a3=1)
r.zadd('b', a1=2, a2=2, a3=2)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zunionstore('d', ['a', 'b', 'c']) == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a2'), 3), (b('a4'), 4), (b('a3'), 8), (b('a1'), 9)]
def test_zunionstore_max(self, r):
r.zadd('a', a1=1, a2=1, a3=1)
r.zadd('b', a1=2, a2=2, a3=2)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a2'), 2), (b('a4'), 4), (b('a3'), 5), (b('a1'), 6)]
def test_zunionstore_min(self, r):
r.zadd('a', a1=1, a2=2, a3=3)
r.zadd('b', a1=2, a2=2, a3=4)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a1'), 1), (b('a2'), 2), (b('a3'), 3), (b('a4'), 4)]
def test_zunionstore_with_weight(self, r):
r.zadd('a', a1=1, a2=1, a3=1)
r.zadd('b', a1=2, a2=2, a3=2)
r.zadd('c', a1=6, a3=5, a4=4)
assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4
assert r.zrange('d', 0, -1, withscores=True) == \
[(b('a2'), 5), (b('a4'), 12), (b('a3'), 20), (b('a1'), 23)]
### HASH COMMANDS ###
def test_hget_and_hset(self, r):
r.hmset('a', {'1': 1, '2': 2, '3': 3})
assert r.hget('a', '1') == b('1')
assert r.hget('a', '2') == b('2')
assert r.hget('a', '3') == b('3')
# field was updated, redis returns 0
assert r.hset('a', '2', 5) == 0
assert r.hget('a', '2') == b('5')
# field is new, redis returns 1
assert r.hset('a', '4', 4) == 1
assert r.hget('a', '4') == b('4')
# key inside of hash that doesn't exist returns null value
assert r.hget('a', 'b') is None
def test_hdel(self, r):
r.hmset('a', {'1': 1, '2': 2, '3': 3})
assert r.hdel('a', '2') == 1
assert r.hget('a', '2') is None
assert r.hdel('a', '1', '3') == 2
assert r.hlen('a') == 0
def test_hexists(self, r):
r.hmset('a', {'1': 1, '2': 2, '3': 3})
assert r.hexists('a', '1')
assert not r.hexists('a', '4')
def test_hgetall(self, r):
h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')}
r.hmset('a', h)
assert r.hgetall('a') == h
def test_hincrby(self, r):
assert r.hincrby('a', '1') == 1
assert r.hincrby('a', '1', amount=2) == 3
assert r.hincrby('a', '1', amount=-2) == 1
@skip_if_server_version_lt('2.6.0')
def test_hincrbyfloat(self, r):
assert r.hincrbyfloat('a', '1') == 1.0
assert r.hincrbyfloat('a', '1') == 2.0
assert r.hincrbyfloat('a', '1', 1.2) == 3.2
def test_hkeys(self, r):
h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')}
r.hmset('a', h)
local_keys = list(iterkeys(h))
remote_keys = r.hkeys('a')
assert (sorted(local_keys) == sorted(remote_keys))
def test_hlen(self, r):
r.hmset('a', {'1': 1, '2': 2, '3': 3})
assert r.hlen('a') == 3
def test_hmget(self, r):
assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3})
assert r.hmget('a', 'a', 'b', 'c') == [b('1'), b('2'), b('3')]
def test_hmset(self, r):
h = {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')}
assert r.hmset('a', h)
assert r.hgetall('a') == h
def test_hsetnx(self, r):
# Initially set the hash field
assert r.hsetnx('a', '1', 1)
assert r.hget('a', '1') == b('1')
assert not r.hsetnx('a', '1', 2)
assert r.hget('a', '1') == b('1')
def test_hvals(self, r):
h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')}
r.hmset('a', h)
local_vals = list(itervalues(h))
remote_vals = r.hvals('a')
assert sorted(local_vals) == sorted(remote_vals)
### SORT ###
def test_sort_basic(self, r):
r.rpush('a', '3', '2', '1', '4')
assert r.sort('a') == [b('1'), b('2'), b('3'), b('4')]
def test_sort_limited(self, r):
r.rpush('a', '3', '2', '1', '4')
assert r.sort('a', start=1, num=2) == [b('2'), b('3')]
def test_sort_by(self, r):
r['score:1'] = 8
r['score:2'] = 3
r['score:3'] = 5
r.rpush('a', '3', '2', '1')
assert r.sort('a', by='score:*') == [b('2'), b('3'), b('1')]
def test_sort_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
assert r.sort('a', get='user:*') == [b('u1'), b('u2'), b('u3')]
def test_sort_get_multi(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
assert r.sort('a', get=('user:*', '#')) == \
[b('u1'), b('1'), b('u2'), b('2'), b('u3'), b('3')]
def test_sort_get_groups_two(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
assert r.sort('a', get=('user:*', '#'), groups=True) == \
[(b('u1'), b('1')), (b('u2'), b('2')), (b('u3'), b('3'))]
def test_sort_groups_string_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
with pytest.raises(exceptions.DataError):
r.sort('a', get='user:*', groups=True)
def test_sort_groups_just_one_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
with pytest.raises(exceptions.DataError):
r.sort('a', get=['user:*'], groups=True)
def test_sort_groups_no_get(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r.rpush('a', '2', '3', '1')
with pytest.raises(exceptions.DataError):
r.sort('a', groups=True)
def test_sort_groups_three_gets(self, r):
r['user:1'] = 'u1'
r['user:2'] = 'u2'
r['user:3'] = 'u3'
r['door:1'] = 'd1'
r['door:2'] = 'd2'
r['door:3'] = 'd3'
r.rpush('a', '2', '3', '1')
assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \
[
(b('u1'), b('d1'), b('1')),
(b('u2'), b('d2'), b('2')),
(b('u3'), b('d3'), b('3'))
]
def test_sort_desc(self, r):
r.rpush('a', '2', '3', '1')
assert r.sort('a', desc=True) == [b('3'), b('2'), b('1')]
def test_sort_alpha(self, r):
r.rpush('a', 'e', 'c', 'b', 'd', 'a')
assert r.sort('a', alpha=True) == \
[b('a'), b('b'), b('c'), b('d'), b('e')]
def test_sort_store(self, r):
r.rpush('a', '2', '3', '1')
assert r.sort('a', store='sorted_values') == 3
assert r.lrange('sorted_values', 0, -1) == [b('1'), b('2'), b('3')]
def test_sort_all_options(self, r):
r['user:1:username'] = 'zeus'
r['user:2:username'] = 'titan'
r['user:3:username'] = 'hermes'
r['user:4:username'] = 'hercules'
r['user:5:username'] = 'apollo'
r['user:6:username'] = 'athena'
r['user:7:username'] = 'hades'
r['user:8:username'] = 'dionysus'
r['user:1:favorite_drink'] = 'yuengling'
r['user:2:favorite_drink'] = 'rum'
r['user:3:favorite_drink'] = 'vodka'
r['user:4:favorite_drink'] = 'milk'
r['user:5:favorite_drink'] = 'pinot noir'
r['user:6:favorite_drink'] = 'water'
r['user:7:favorite_drink'] = 'gin'
r['user:8:favorite_drink'] = 'apple juice'
r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4')
num = r.sort('gods', start=2, num=4, by='user:*:username',
get='user:*:favorite_drink', desc=True, alpha=True,
store='sorted')
assert num == 4
assert r.lrange('sorted', 0, 10) == \
[b('vodka'), b('milk'), b('gin'), b('apple juice')]
class TestStrictCommands(object):
def test_strict_zadd(self, sr):
sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0)
assert sr.zrange('a', 0, -1, withscores=True) == \
[(b('a1'), 1.0), (b('a2'), 2.0), (b('a3'), 3.0)]
def test_strict_lrem(self, sr):
sr.rpush('a', 'a1', 'a2', 'a3', 'a1')
sr.lrem('a', 0, 'a1')
assert sr.lrange('a', 0, -1) == [b('a2'), b('a3')]
def test_strict_setex(self, sr):
assert sr.setex('a', 60, '1')
assert sr['a'] == b('1')
assert 0 < sr.ttl('a') <= 60
def test_strict_ttl(self, sr):
assert not sr.expire('a', 10)
sr['a'] = '1'
assert sr.expire('a', 10)
assert 0 < sr.ttl('a') <= 10
assert sr.persist('a')
assert sr.ttl('a') == -1
@skip_if_server_version_lt('2.6.0')
def test_strict_pttl(self, sr):
assert not sr.pexpire('a', 10000)
sr['a'] = '1'
assert sr.pexpire('a', 10000)
assert 0 < sr.pttl('a') <= 10000
assert sr.persist('a')
assert sr.pttl('a') == -1
class TestBinarySave(object):
def test_binary_get_set(self, r):
assert r.set(' foo bar ', '123')
assert r.get(' foo bar ') == b('123')
assert r.set(' foo\r\nbar\r\n ', '456')
assert r.get(' foo\r\nbar\r\n ') == b('456')
assert r.set(' \r\n\t\x07\x13 ', '789')
assert r.get(' \r\n\t\x07\x13 ') == b('789')
assert sorted(r.keys('*')) == \
[b(' \r\n\t\x07\x13 '), b(' foo\r\nbar\r\n '), b(' foo bar ')]
assert r.delete(' foo bar ')
assert r.delete(' foo\r\nbar\r\n ')
assert r.delete(' \r\n\t\x07\x13 ')
def test_binary_lists(self, r):
mapping = {
b('foo bar'): [b('1'), b('2'), b('3')],
b('foo\r\nbar\r\n'): [b('4'), b('5'), b('6')],
b('foo\tbar\x07'): [b('7'), b('8'), b('9')],
}
# fill in lists
for key, value in iteritems(mapping):
r.rpush(key, *value)
# check that KEYS returns all the keys as they are
assert sorted(r.keys('*')) == sorted(list(iterkeys(mapping)))
# check that it is possible to get list content by key name
for key, value in iteritems(mapping):
assert r.lrange(key, 0, -1) == value
def test_22_info(self, r):
"""
Older Redis versions contained 'allocation_stats' in INFO that
was the cause of a number of bugs when parsing.
"""
info = "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330," \
"13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020," \
"20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303," \
"27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160," \
"34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523," \
"41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171," \
"49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332," \
"58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30," \
"67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25," \
"76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46," \
"85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20," \
"94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15," \
"103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52," \
"111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54," \
"119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52," \
"127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62," \
"135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7," \
"144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1," \
"155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2," \
"172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3," \
"187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1," \
"207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2," \
"220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1," \
">=256=203"
parsed = parse_info(info)
assert 'allocation_stats' in parsed
assert '6' in parsed['allocation_stats']
assert '>=256' in parsed['allocation_stats']
def test_large_responses(self, r):
"The PythonParser has some special cases for return values > 1MB"
# load up 5MB of data into a key
data = ''.join([ascii_letters] * (5000000 // len(ascii_letters)))
r['a'] = data
assert r['a'] == b(data)
def test_floating_point_encoding(self, r):
"""
High precision floating point values sent to the server should keep
precision.
"""
timestamp = 1349673917.939762
r.zadd('a', 'a1', timestamp)
assert r.zscore('a', 'a1') == timestamp
|
|
import os
import sys
PY3 = sys.version_info[0] == 3
class ShamirSecret(object):
"""
This performs Shamir Secret Sharing operations in an incremental way
that is useful for PolyPasswordHasher. It allows checking membership, genering
shares one at a time, etc.
Creates an object. One must provide the threshold. If you want to have it create the coefficients, etc.
call it with secret data
"""
def __init__(self, threshold, secretdata=None):
self.threshold = threshold
self.secretdata = secretdata
self._coefficients = None
# if we're given data, let's compute the random coefficients. I do this
# here so I can later iteratively compute the shares
if secretdata is not None:
self._coefficients = []
for secretbyte in secretdata:
# this is the polynomial. The first byte is the secretdata.
# The next threshold-1 are (crypto) random coefficients
# I'm applying Shamir's secret sharing separately on each byte.
if PY3:
secretbyte = secretbyte.to_bytes(1, "little")
thesecoefficients = bytearray(secretbyte + os.urandom(threshold - 1))
self._coefficients.append(thesecoefficients)
def is_valid_share(self, share):
"""
This validates that a share is correct given the secret data.
It returns True if it is valid, False if it is not, and raises
various errors when given bad data.
"""
# the share is of the format x, f(x)f(x)
if type(share) is not tuple:
raise TypeError("Share is of incorrect type: {0}".format(type(share)))
if len(share) != 2:
raise ValueError("Share is of incorrect length: {0}".format(share))
if self._coefficients is None:
raise ValueError("Must initialize coefficients before checking is_valid_share")
if len(self._coefficients) != len(share[1]):
raise ValueError("Must initialize coefficients before checking is_valid_share")
# let's just compute the right value
return self.compute_share(share[0]) == share
def compute_share(self, x):
"""
This computes a share, given x. It returns a tuple with x and the
individual f(x_0)f(x_1)f(x_2)... bytes for each byte of the secret.
This raises various errors when given bad data.
"""
if type(x) is not int:
raise TypeError("In compute_share, x is of incorrect type: {0}".format(type(x)))
if x <= 0 or x >= 256:
raise ValueError("In compute_share, x must be between 1 and 255, not: {0}".format(x))
if self._coefficients is None:
raise ValueError("Must initialize coefficients before computing a share")
sharebytes = bytearray()
# go through the coefficients and compute f(x) for each value.
# Append that byte to the share
for thiscoefficient in self._coefficients:
thisshare = _f(x, thiscoefficient)
sharebytes.append(thisshare)
return (x, sharebytes)
def recover_secretdata(self, shares):
"""
This recovers the secret data and coefficients given at least threshold
shares. Note, if any provided share does not decode, an error is
raised.
"""
# discard duplicate shares
newshares = []
for share in shares:
if share not in newshares:
newshares.append(share)
shares = newshares
if self.threshold > len(shares):
raise ValueError("Threshold: {0} is smaller than the number of unique shares: {1}.".format(self.threshold, len(shares)))
if self.secretdata is not None:
raise ValueError("Recovering secretdata when some is stored. Use check_share instead.")
# the first byte of each share is the 'x'.
xs = []
for share in shares:
# the first byte should be unique...
if share[0] in xs:
raise ValueError("Different shares with the same first byte! {0!r}".format(share[0]))
# ...and all should be the same length
if len(share[1]) != len(shares[0][1]):
raise ValueError("Shares have different lengths!")
xs.append(share[0])
mycoefficients = []
mysecretdata = b''
# now walk through each byte of the secret and do lagrange interpolation
# to compute the coefficient...
for byte_to_use in range(0, len(shares[0][1])):
# we need to get the f(x)s from the appropriate bytes
fxs = []
for share in shares:
fxs.append(share[1][byte_to_use])
# build this polynomial
resulting_poly = _full_lagrange(xs, fxs)
# If I have more shares than the threshold, the higher order coefficients
# (those greater than threshold) must be zero (by Lagrange)...
if resulting_poly[:self.threshold] + [0] * (len(shares) - self.threshold) != resulting_poly:
raise ValueError("Shares do not match. Cannot decode")
# track this byte...
mycoefficients.append(bytearray(resulting_poly))
# python 2 apparently had str=bytes, so using strings would make
# sense, this is not the case with python 3, and we need to do
# take special considerations with data types.
if PY3:
secret_byte = resulting_poly[0].to_bytes(1, "little")
else:
secret_byte = chr(resulting_poly[0])
mysecretdata += secret_byte
# they check out! Assign to the real ones!
self._coefficients = mycoefficients
self.secretdata = mysecretdata
####################### END OF MAIN CLASS #######################
### Private math helpers... Lagrange interpolation, polynomial math, etc.
# This actually computes f(x). It's private and not needed elsewhere...
def _f(x, coefs_bytes):
"""
This computes f(x) = a + bx + cx^2 + ...
The value x is x in the above formula.
The a, b, c, etc. bytes are the coefs_bytes in increasing order.
It returns the result.
"""
if x == 0:
raise ValueError('invalid share index value, cannot be 0')
accumulator = 0
# start with x_i = 1. We'll multiply by x each time around to increase it.
x_i = 1
for c in coefs_bytes:
# we multiply this byte (a,b, or c) with x raised to the right power.
accumulator = _gf256_add(accumulator, _gf256_mul(c, x_i))
# raise x_i to the next power by multiplying by x.
x_i = _gf256_mul(x_i, x)
return accumulator
# unfortunately, numpy doesn't seem to do polynomial arithematic over
# finite fields... :(
#
# This helper function takes two lists and 'multiplies' them. I only tested
# the second list is of size <=2, but I don't think this matters.
#
# for example: [1,3,4] * [4,5] will compute (1 + 3x + 4x^2) * (4 - 5x) ->
# 4 + 17x + 31x^2 + 20x^3 or [4, 17, 31, 20]
# or at least, this would be the case if we weren't in GF256...
# in GF256, this is:
# 4 + 9x + 31x^2 + 20x^3 or [4, 9, 31, 20]
def _multiply_polynomials(a, b):
# I'll compute each term separately and add them together
resultterms = []
# this grows to account for the fact the terms increase as it goes
# for example, multiplying by x, shifts all 1 right
termpadding = []
for bterm in b:
thisvalue = termpadding[:]
# multiply each a by the b term.
for aterm in a:
thisvalue.append(_gf256_mul(aterm, bterm))
# thisvalue.append(aterm * bterm)
resultterms = _add_polynomials(resultterms, thisvalue)
# moved another x value over...
termpadding.append(0)
return resultterms
# adds two polynomials together...
def _add_polynomials(a, b):
# make them the same length...
if len(a) < len(b):
a = a + [0]*(len(b)-len(a))
if len(a) > len(b):
b = b + [0]*(len(a)-len(b))
assert(len(a) == len(b))
result = []
for pos in range(len(a)):
# result.append(a[pos] + b[pos])
result.append(_gf256_add(a[pos], b[pos]))
return result
# For lists containing xs and fxs, compute the full Lagrange basis polynomials.
# We want it all to populate the coefficients to check the shares by new
# share generation
def _full_lagrange(xs, fxs):
assert(len(xs) == len(fxs))
returnedcoefficients = []
# we need to compute:
# l_0 = (x - x_1) / (x_0 - x_1) * (x - x_2) / (x_0 - x_2) * ...
# l_1 = (x - x_0) / (x_1 - x_0) * (x - x_2) / (x_1 - x_2) * ...
for i in range(len(fxs)):
this_polynomial = [1]
# take the terms one at a time.
# I'm computing the denominator and using it to compute the polynomial.
for j in range(len(fxs)):
# skip the i = jth term because that's how Lagrange works...
if i == j:
continue
# I'm computing the denominator and using it to compute the polynomial.
denominator = _gf256_sub(xs[i], xs[j])
# denominator = xs[i]-xs[j]
# don't need to negate because -x = x in GF256
this_term = [_gf256_div(xs[j], denominator), _gf256_div(1, denominator)]
# this_term = [-xs[j]/denominator, 1/denominator]
# let's build the polynomial...
this_polynomial = _multiply_polynomials(this_polynomial, this_term)
# okay, now I've gone and computed the polynomial. I need to multiply it
# by the result of f(x)
this_polynomial = _multiply_polynomials(this_polynomial, [fxs[i]])
# we've solved this polynomial. We should add to the others.
returnedcoefficients = _add_polynomials(returnedcoefficients, this_polynomial)
return returnedcoefficients
###### GF256 helper functions... ###########
# GF(256) lookup tables using x^8 + x^4 + x^3 + x + 1
# FYI: addition is just XOR in this field.
# I used this because it's used in tss and AES
_GF256_EXP = [
0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff,
0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35,
0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4,
0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa,
0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26,
0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31,
0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc,
0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd,
0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7,
0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88,
0x83, 0x9e, 0xb9, 0xd0, 0x6b, 0xbd, 0xdc, 0x7f,
0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a,
0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0,
0x0b, 0x1d, 0x27, 0x69, 0xbb, 0xd6, 0x61, 0xa3,
0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec,
0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0,
0xfb, 0x16, 0x3a, 0x4e, 0xd2, 0x6d, 0xb7, 0xc2,
0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41,
0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0,
0x5b, 0xed, 0x2c, 0x74, 0x9c, 0xbf, 0xda, 0x75,
0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e,
0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80,
0x9b, 0xb6, 0xc1, 0x58, 0xe8, 0x23, 0x65, 0xaf,
0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54,
0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09,
0x1b, 0x2d, 0x77, 0x99, 0xb0, 0xcb, 0x46, 0xca,
0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91,
0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e,
0x12, 0x36, 0x5a, 0xee, 0x29, 0x7b, 0x8d, 0x8c,
0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17,
0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd,
0x1c, 0x24, 0x6c, 0xb4, 0xc7, 0x52, 0xf6, 0x01
]
# The last entry was wrong!!! I've fixed it.
# entry 0 is undefined
_GF256_LOG = [
0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6,
0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03,
0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef,
0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1,
0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a,
0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,
0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24,
0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e,
0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94,
0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38,
0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62,
0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,
0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42,
0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba,
0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca,
0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57,
0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74,
0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,
0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5,
0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0,
0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec,
0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7,
0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86,
0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,
0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc,
0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1,
0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47,
0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab,
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89,
0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18,
0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07
]
def _gf256_add(a, b):
return a ^ b
def _gf256_sub(a, b):
return _gf256_add(a, b)
def _gf256_mul(a, b):
if a == 0 or b == 0:
return 0
return _GF256_EXP[(_GF256_LOG[a] + _GF256_LOG[b]) % 255]
def _gf256_div(a, b):
if a == 0:
return 0
if b == 0:
raise ZeroDivisionError
return _GF256_EXP[(_GF256_LOG[a] - _GF256_LOG[b]) % 255]
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
from time import sleep, time
from errbot import botcmd, BotPlugin, arg_botcmd
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseUpload
from charts import interval, generate_timeseries_linechart, generate_barchart
from charts.line import Collection, Line
def get_ts():
now = datetime.now()
return '%s.%d' % (now.strftime('%Y%m%d-%H%M%S'), now.microsecond)
class BigQuery(BotPlugin):
def activate(self):
super().activate()
if 'queries' not in self:
self['queries'] = []
self.gc = self.get_plugin('GoogleCloud')
self.credentials = self.gc.credentials
self.bigquery = build('bigquery', 'v2', credentials=self.credentials)
def project(self):
if not self.is_activated:
return None
if 'project' not in self.gc:
raise Exception('You need to define a project with !project set first.')
return self.gc['project']
def bucket(self):
if 'bucket' not in self.gc:
raise Exception('No Bucket set.')
return self.gc['bucket']
@botcmd
def bq_datasets(self, msg, args):
"""List the datasets from the project."""
datasets = self.bigquery.datasets()
response = datasets.list(projectId=self.project()).execute()
for dataset in response['datasets']:
yield '%s' % dataset['datasetReference']['datasetId']
@staticmethod
def extract_fields(schema_fields):
result = []
for field in schema_fields:
typ = field['type']
name = field['name']
if typ == 'TIMESTAMP':
result.append((name,
lambda s: datetime.fromtimestamp(float(s)).strftime('%Y-%m-%d %H:%M:%S')
))
elif typ == 'STRING':
result.append((name,
lambda s: s))
else:
result.append((name, str))
return result
@botcmd
def bq_addquery(self, msg, args: str):
"""
Stores a query and assigns it a number.
"""
with self.mutable('queries') as queries:
queries.append(args)
return "Your query has been stored, you can execute it with !bq %i." % (len(queries) - 1)
@botcmd
def bq_delquery(self, msg, args: str):
"""
Removes a stored query.
"""
with self.mutable('queries') as queries:
del queries[int(args)]
return "%i queries have been defined." % len(queries)
@botcmd
def bq_queries(self, msg, args: str):
return '\n\n'.join("%i: %s" % (i, q) for i, q in enumerate(self['queries']))
@botcmd
def bq(self, msg, args: str):
"""Start a new query."""
# if it is a number, assume it is an index for the saved queries.
try:
args = self['queries'][int(args)]
except ValueError:
pass
if not args:
return 'Usage: !bq QUERY_OR_QUERY_INDEX\nYou can save a query with !bq addquery'
query = args.strip()
for response, feedback in self.sync_bq_job(query):
if response:
break
yield feedback
fields = self.extract_fields(response['schema']['fields'])
i = 0
rows = []
for row in response['rows']:
rows.append([field[1](value['v']) for field, value in zip(fields, row['f'])])
i += 1
if i == 10:
break
header = '| ' + ' | '.join(field[0] for field in fields) + ' |\n'
values = '\n'.join('| ' + ' | '.join(v for v in row) + ' |' for row in rows)
return header + values
def sync_bq_job(self, query: str):
"""
Execute Synchronously the given query on BigQuery.
:param query: the bq query
:return: tuple of the response or None, Feedback if None.
"""
start_time = time()
jobs = self.bigquery.jobs()
response = jobs.query(projectId=self.project(), body={'query': query}).execute()
job_id = None
while 'jobComplete' in response and not response['jobComplete']:
job_id = response['jobReference']['jobId']
yield None, 'BigQuery job "%s" is in progress ... %0.2fs' % (job_id, time() - start_time)
sleep(5)
response = jobs.get(projectId=self.project(), jobId=job_id).execute()
if job_id:
yield jobs.getQueryResults(projectId=self.project(), jobId=job_id).execute(), ''
else:
yield response, ''
@arg_botcmd('query', type=str)
@arg_botcmd('--index', dest='index', type=str, default='0')
@arg_botcmd('--values', dest='values', type=str)
def bq_chart(self, msg, query: str, index: str, values: str):
"""
Start a new query and graph the result.
By default it will autoguess the graph type depending on the first column.
Otherwise you can specify the column index or name of the index with --index and the value columns to graph
with --values separated with comma.
"""
# if it is a number, assume it is an index for the saved queries.
try:
query = self['queries'][int(query)]
except ValueError:
pass
if not query:
yield 'Usage: !bq chart [--index nb_or_name] [--values nb_or_name,...] QUERY_OR_QUERY_INDEX\n' \
'You can save a query with !bq addquery'
for response, feedback in self.sync_bq_job(query):
if response:
break
yield feedback
schema_fields = response['schema']['fields']
try:
index_index = int(index)
except ValueError:
index_index = next(i for i, field in enumerate(schema_fields) if field['name'] == index)
if values:
value_strs = values.split(',')
try:
values_indices = [int(value) for value in value_strs]
except ValueError:
values_indices = [i for i, field in enumerate(schema_fields) if field['name'] in value_strs]
else:
values_indices = list(range(1, len(schema_fields))) # assume all the columns are relevant
filename = '%s.%s.png' % (self.project(), get_ts())
output = os.path.join(self.gc.outdir, filename)
if schema_fields[index_index]['type'] == 'TIMESTAMP':
# Generate a timeseries graph.
# makes a "pivot" for the data to be graphable.
xs = [] # xs is constant for all the series.
series = [[]] * len(values_indices)
for row in response['rows']:
y = 0
for i, value in enumerate(row['f']):
if i in values_indices:
series[y].append(float(value['v']))
y += 1
xs.append(datetime.fromtimestamp(float(row['f'][index_index]['v'])))
start, end = xs[0], xs[-1]
collection = Collection(
lines=[Line(schema_fields[values_indices[i]]['name'], xs, ys) for i, ys in enumerate(series)],
title=query,
start=start,
end=end)
generate_timeseries_linechart(
collection=collection,
time_interval_display=interval.guess(start, end),
outfile=output,
)
yield self.save_image(filename, output, response)['mediaLink']
elif schema_fields[index_index]['type'] == 'STRING':
labels = []
values = []
for row in response['rows']:
labels.append(row['f'][index_index]['v'])
values.append(row['f'][1]['v'])
with open(output, 'rb') as source:
generate_barchart(title=filename, ylabel='', labels=labels, values=values, outfile=source)
yield self.save_image(filename, output, response)['mediaLink']
else:
yield "The index column is of type %s which is not compatible for a graph: " \
"it should be either a TIMESTAMP or a STRING." % schema_fields[index_index]['type']
def save_image(self, filename, output, response):
with open(output, 'rb') as source:
media = MediaIoBaseUpload(source, mimetype='image/png')
response = self.gc.storage.objects().insert(bucket=self.bucket(),
name=filename,
media_body=media,
predefinedAcl='publicRead').execute()
return response
|
|
# $Id: __init__.py 7320 2012-01-19 22:33:02Z milde $
# Authors: Chris Liechti <cliechti@gmx.net>;
# David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
from docutils._compat import b
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Set the initial view mode to "slideshow" [default] or "outline".',
['--view-mode'],
{'choices': ['slideshow', 'outline'], 'default': 'slideshow',
'metavar': '<mode>'}),
('Normally hide the presentation controls in slideshow mode. '
'This is the default.',
['--hidden-controls'],
{'action': 'store_true', 'default': True,
'validator': frontend.validate_boolean}),
('Always show the presentation controls in slideshow mode. '
'The default is to hide the controls.',
['--visible-controls'],
{'dest': 'hidden_controls', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="%(view_mode)s" />
<meta name="controlVis" content="%(control_visibility)s" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
view_mode = self.document.settings.view_mode
control_visibility = ('visible', 'hidden')[self.document.settings
.hidden_controls]
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path,
'view_mode': view_mode,
'control_visibility': control_visibility})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = False
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = True
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in required_files_copied.keys():
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if dest in self.theme_files_copied:
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
b('ui/default'),
dest_dir[dest_dir.rfind('ui/'):].encode(
sys.getfilesystemencoding())))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node):
html4css1.HTMLTranslator.visit_title(self, node)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 22 16:15:28 2016
@author: login
"""
import pandas as pd
import numpy as np
import sys, os
from sklearn.decomposition import PCA, TruncatedSVD, LatentDirichletAllocation
from sklearn.preprocessing import StandardScaler
from copy import deepcopy
from scipy import stats as ss
from rarefy import rarefy_table
def rarefyOTUtables(otu_df, meta_df):
rare_table = rarefy_table(otu_df.copy())
rt_m = meta_df.copy()
rt_m.ix[:, otu_df.columns] = rare_table
return rare_table, rt_m
def simpleProportions(otu_df, meta_df):
sp_otu = otu_df.divide(otu_df.sum(axis=1), axis='rows')
sp_m = meta_df.copy()
sp_m.ix[:, otu_df.columns] = sp_otu
return sp_otu, sp_m
def standardScaled(otu_df, meta_df):
data_ = otu_df.copy().values
new_data_ = StandardScaler().fit_transform(data_)
ss_otu = pd.DataFrame(new_data_, index=otu_df.index, columns=otu_df.columns)
ss_m = meta_df.copy()
ss_m.ix[:, ss_otu.columns] = ss_otu.ix[:, ss_otu.columns]
return ss_otu, ss_m
def seq_to_taxa(seq_list, all_taxa, taxa_series):
"""
Accepts a list of `seq` labels, the `all_taxa` set,
Returns a vector of taxanomic occupancy of that set
"""
def split_seq(seq):
return taxa_series[seq].split(";")
long_taxa_arr = np.array(map(split_seq, seq_list))
axis, counts = np.unique(long_taxa_arr, return_counts=True)
this_taxa_ser = pd.Series(data=np.zeros((len(all_taxa),)),
index=list(all_taxa))
for s, n in zip(axis, counts):
this_taxa_ser[s] = n
return this_taxa_ser
def score_clusters(test_cluster_dict, all_taxa, taxa_series, test_labels):
print "Scoring clusters"
top_level = np.array(test_cluster_dict.keys()).max()
bottom_level=np.array(test_cluster_dict.keys()).min()
iteration_order = np.arange(bottom_level, top_level)[::-1]
p_values = np.ones(iteration_order.shape)
child_clusts = np.zeros(iteration_order.shape)
parent_clusts = np.zeros(iteration_order.shape)
for idx, clust_n in enumerate(iteration_order):
this_clust = test_cluster_dict[clust_n]
this_labels = [test_labels[int(i)] for i in this_clust]
this_clust_set = set(this_clust)
this_taxa_ser = seq_to_taxa(this_labels, all_taxa, taxa_series)
higher_tree_levels = np.arange(clust_n+1,top_level+1)
for clust_m in higher_tree_levels:
higher_clust = set(test_cluster_dict[clust_m])
if this_clust_set.issubset(higher_clust):
parent_clust = [test_labels[i] for i in list(higher_clust)]
break
else:
pass
higher_taxa_ser = seq_to_taxa(parent_clust, all_taxa, taxa_series)
parent_clusts[idx] = clust_m
child_clusts[idx] = clust_n
p_values[idx] = Ftest_pvalue(this_taxa_ser.values, higher_taxa_ser.values)
cluster_df_data = np.vstack((parent_clusts,
child_clusts,
p_values)).T
clust_df_cols = ['parent', 'child', 'p-val']
cluster_df = pd.DataFrame(data=cluster_df_data, columns=clust_df_cols)
print "Analyzed clusters"
return cluster_df
def Ftest_pvalue(d1,d2):
"""takes two vectors and performs an F-test, returning the p value"""
df1 = len(d1) - 1
df2 = len(d2) - 1
F = np.var(d1) / np.var(d2)
p_value = ss.f.cdf(F, df1, df2)
return p_value
def append_depths(df, depths_vector):
"""
This takes a df with a series of surface concentrations i.e. has a depth
column that has 0 in all the rows. It then:
1. copies the df as provided to create a template
2. iterates over the depth_vector, pulling out each depth sequentially
2. copies the template
3. modifies the copied template to the particular depth
4. appends the template to the original provided DF
The purpose of this is to use regression base methods to see if the concentration
at the surface is somehow related to the distributions of microbes throughout
the water column.
"""
n_rows, _ = df.shape
multiples = len(depths_vector)
expected_rows_n = n_rows*multiples
new_depth_template = df.copy()
for d in depths_vector:
if d != 0:
this_depth = new_depth_template.copy()
this_depth['depth'] = np.ones((new_depth_template.shape[0], ))*d
df = df.append(this_depth)
print "Returning df of shape {}, expecting {} rows".format(df.shape,
expected_rows_n)
return df
def removeZeroCols(df):
return (df.T[(df != 0).any()]).T
def parseBiosample(df):
"""
This function accepts a df with samples in rows and OTUs in columns
and parses the biosample key
"""
biosamples = list(df.index)
dates_, primers_, kits_, replicates_, depths_ = [], [], [], [], []
for bs in biosamples:
if bs[:2] == "SB":
clipped = bs[2:]
else:
sys.exit("Non-SB start to biosample")
if 'TAWMD' in clipped:
date, rest = clipped.split("TAWMD")
elif "TAWWD" in clipped:
date, rest = clipped.split("TAWWD")
else:
sys.exit("Bridge Sequence not Detected")
if "VV4T" in rest:
primer = "VV4"
depth, rest2 = rest.split("VV4T")
elif "VV4V5T" in rest:
primer = "V4V5"
depth, rest2 = rest.split("VV4V5T")
else:
sys.exit("Primer Sequence not Detected")
if rest2[0] == "M":
kit = "MolBio"
elif rest2[0] == "Q":
kit = "Qiagen"
elif rest2[:4] == "filt" and rest2[4] == "M":
kit = "MolBio"
elif rest2[:2] == "NA":
kit = "NA"
else:
print clipped
print rest2[0]
sys.exit("kit type not detected")
if rest2[-2] == "R":
replicate = rest2[-1]
else:
sys.exit("replicate signifier not detected")
if depth == '015':
depth = '01.5'
dates_.append(date)
primers_.append(primer)
kits_.append(kit)
replicates_.append(replicate)
depths_.append(depth)
df['date'] = dates_
df['primers'] = primers_
df['kit'] = kits_
df['replicates'] = replicates_
df['depth'] = depths_
return df
def add_quadrants(dF):
depth_list = list(dF.depth)
depth_ns = np.array([float(n) for n in depth_list])
quad_ns = np.array([" "]*len(depth_ns))
quad_ns[depth_ns < 5] = "Q1"
quad_ns[(depth_ns > 4) & (depth_ns < 11)]= "Q2"
quad_ns[(depth_ns > 10) & (depth_ns < 17)]= "Q3"
quad_ns[depth_ns > 16]= "Q4"
dF["Quadrants"] = quad_ns
return dF.copy()
def numericEncodings(df_orig, metadata_cols, verbose=True):
df = df_orig.copy()
print "Changing {} metadata columns".format(len(metadata_cols))
unq_metadata = {i:{} for i in metadata_cols}
int_vals = ['Forward read length', 'Index read length', 'Quadrants',
'Sequencing Date', 'Sequencing platform', 'kit', 'primers',
'replicates']
dontTransform = ['Coverage', 'TotalSeqs', 'BadBarcodes', 'GoodBarcodes',
'seasonality']
for md, unqs in unq_metadata.items():
if md in int_vals:
if verbose:
print "\n", md
for num, unq in enumerate(np.unique(df[md])):
num+=1
unqs[num] = unq
if verbose == True:
print "Encoding", unq, "as", num
bool_ = df[md] == unq
df.ix[bool_, md] = num
elif md in dontTransform:
pass
elif md == 'date' or md == 'Date':
earliest = df[md].min()
earliest = pd.to_datetime(earliest)
for unq in np.unique(df[md]):
this_day = pd.to_datetime(unq)
td = (this_day - earliest).days
unqs[td] = unq
if verbose == True:
print "Encoding", unq, "as", td, "(days since first day)"
bool_ = df[md] == unq
df.ix[bool_, md] = td
elif md == 'depth' or md == 'Depth':
for unq in np.unique(df[md]):
unqs[unq] = float(unq)
if verbose == True:
print "Encoding", unq, "as", float(unq)
bool_ = df[md] == unq
df.ix[bool_, md] = float(unq)
else:
sys.exit("Illegal var type detected")
for i in metadata_cols:
df[[i]] = df[[i]].apply(pd.to_numeric)
df2 = df.copy()
return df2, unq_metadata
from itertools import groupby
def listRepGroups(df):
# make a list of the index
dfindex = list(df.index)
# create a list for each grouping and a list of already matched samples
rep_groups, consumed = [], []
# Start with one index
for first in dfindex:
this_group = []
# Check the entire list for members that match
if first not in consumed:
for second in dfindex:
# If a sample wasn't already consumed, and isn't the exact same sample
if second not in consumed and first != second:
# check for a match
if first.split("VV")[0] == second.split("VV")[0]:
# if detected, add to the already consumed list
consumed.append(second)
this_group.append(second)
if len(this_group) != 0:
this_group.append(first)
this_group.sort()
rep_groups.append(this_group)
rep_groups.sort()
unq_rep_groups = list(rg for rg,_ in groupby(rep_groups))
print len(unq_rep_groups), "groups of replicates detected"
return unq_rep_groups
def KLD(x,y):
a = np.array(x, dtype=float) + 0.000001
b = np.array(y, dtype=float) + 0.000001
return (a*np.log(a/b)).sum()
def rootJSD(x,y):
a = np.array(x, dtype=float)
b = np.array(y, dtype=float)
return np.sqrt(0.5 * KLD(a, (a+b)/2) + 0.5 * KLD(b, (a+b)/2))
def distant_replicate_covariates(broken_groups, df, df_m, metadata):
# create a dictionary with the combined keys of (mdata_cat + mdata_val)
# for each instance add the average distance for an individual
super_meta_dict = {}
m_only_df = df_m.ix[:, metadata]
# measure average distance to all other replicates
for grp in broken_groups:
dists = JensenShannonDiv_Sqrt(df.ix[grp, :])
for row, idx in zip(range(dists.shape[0]), dists.index):
nz_row = dists.ix[row, :].values
this_ird = nz_row[nz_row !=0].mean()
# add to a particular column & value key
for col in m_only_df.columns:
if col != 'replicates' and col !='depth':
this_val = m_only_df.ix[idx, col]
jk = str(col) + "_" + str(this_val)
if not super_meta_dict.has_key(jk):
super_meta_dict[jk] = [this_ird]
else:
super_meta_dict[jk].append(this_ird)
meta_dict_u = {}
for k, v in super_meta_dict.items():
meta_dict_u[k] = np.array(v)
return meta_dict_u
def plot_interreplicate_distances(df_otus, rep_groups, fnum):
""""
plot distribution of inter-replicate distances
"""
all_dists = []
for idx, group in enumerate(rep_groups):
this_grps = df_otus.ix[group, :]
dist_dat = JensenShannonDiv_Sqrt(this_grps)
for a_d in dist_dat.values.flatten():
if a_d != 0:
all_dists.append(a_d)
plt.figure(fnum, figsize=(8,6))
plt.clf()
plt.hist(all_dists, bins=100)
plt.tick_params(labelsize=14)
plt.xlabel("root JS distance (inter-replicate)", fontsize=14)
plt.ylabel("N", fontsize=14)
def JensenShannonDiv_Sqrt(df_otu):
ps_df = df_otu.copy()
ps_n_df = ps_df.divide(ps_df.sum(axis=1), axis=0)
shape_sq = len(ps_n_df.index)
dist_dat = np.zeros((shape_sq, shape_sq))
for r_idx, r in enumerate(ps_n_df.index):
for c_idx, c in enumerate(ps_n_df.index):
x_ = ps_n_df.ix[r, :].values
y_ = ps_n_df.ix[c, :].values
dist_dat[r_idx, c_idx] = rootJSD(x_, y_)
dist_mat = pd.DataFrame(index=ps_n_df.index, columns=ps_n_df.index,
data=dist_dat)
return dist_mat
def centeredLogRatio(otu_table, otu_table_m):
from scipy.stats.mstats import gmean
noZeros = otu_table.copy().replace(0, np.nan)
geomeans = np.repeat(np.nan, repeats = noZeros.shape[0])
for i in range(0, noZeros.shape[0]):
geomeans[i] = gmean(noZeros.ix[i, :].dropna())
clr_table = np.log(noZeros.divide(geomeans, axis=0))
clr_table.replace(np.nan, 0, inplace=True)
clr_table_m = otu_table_m.copy()
clr_table_m.ix[:, otu_table.columns] = clr_table
return clr_table, clr_table_m
def ReplicateReport(df, df_otus, rep_groups, verbose=True, metric="JSD"):
print "REPLICATE REPORT"
in_rep_distances, worst_reps = [], []
broken_groups = 0
rep_groups_mutable = deepcopy(rep_groups)
for idx, group in enumerate(rep_groups_mutable):
if verbose:
print "Group {}".format(idx)
this_grps = df_otus.ix[group, :]
dist_mat = JensenShannonDiv_Sqrt(this_grps)
# Returns a symmetric matrix with 0s on the diagonal
# So we pull out all unique excluding those on the diagonal
for a_d in np.unique(dist_mat.values):
if a_d != 0:
in_rep_distances.append(a_d)
if verbose == True:
print dist_mat
most_distant = dist_mat.max().max()
if verbose:
print "Most Distant: {}".format(most_distant)
if most_distant > 0.3:
broken_groups+=1
while most_distant > 0.3:
# find one of the pair that are most divergent
bad_reps_bool = (dist_mat.max() == dist_mat.max().max())
bad_means = dist_mat[bad_reps_bool].mean(axis=1)
worst_rep = bad_means.argmax()
worst_reps.append(worst_rep)
if verbose:
print "\tdropping {}".format(worst_rep)
group.remove(worst_rep)
this_grps = df_otus.ix[group, :]
dist_mat = JensenShannonDiv_Sqrt(this_grps)
most_distant = dist_mat.max().max()
if verbose:
print "\tmost distant now: {}".format(most_distant)
return worst_reps, broken_groups
def originate_rep_groupings(final_rep_groups):
final_rep_dict = []
for g in final_rep_groups:
this_dict = {mem:(idx+1) for idx, mem in enumerate(g)}
final_rep_dict.append(this_dict)
return final_rep_dict
def matchXandYbyIndex(clr_x, model_proc_df):
"""
This fxn drops rows in the design matrix & response vector
according to index equivalency.
"""
# drop all rows in x that don't match to y
x_bool = clr_x.index.isin(model_proc_df.index)
x_1 = clr_x[x_bool]
# drop all values in y that don't match to x_1
y_bool = model_proc_df.index.isin(x_1.index)
y_1 = model_proc_df[y_bool]
print "X matrix shape {} reduced to {} rows".format(clr_x.shape,
(x_bool).sum())
print "Y matrix shape {} reduced to {} rows".format(model_proc_df.shape,
(y_bool).sum())
return x_1.values, y_1.values
def prettify_date_string(time_stamp):
return str(time_stamp).split(" ")[0]
def dropBadReps(less_diverse_reps, rep_groups):
"""
1. Unpack current replicate groups
2. Check if any dropped reps are in a given replicate group
3. Drop if present, otherwise pass
4. If replicate groups have >1 member, add it to new group list
5. If not, drop it from group list
6. Return newly assembled list of lists
"""
new_rep_groups = []
old_groups = deepcopy(rep_groups)
for g in old_groups:
for l in less_diverse_reps:
if l in g:
g.remove(l)
else:
pass
if len(g) > 1:
new_rep_groups.append(g)
else:
pass
broken_groups = [i for i in rep_groups if i not in new_rep_groups]
return new_rep_groups, broken_groups
import matplotlib.pyplot as plt
import seaborn as sns
def plotHeatmap(df, fignum):
plt.figure(fignum, figsize=(12,9))
ax = sns.heatmap(df)
for item in ax.get_yticklabels():
item.set_rotation(0)
for item in ax.get_xticklabels():
item.set_rotation(90)
#plt.savefig('seabornPandas.png', dpi=100)
plt.show()
import ecopy as ep
def beta_wrapper(df, var_key):
print "\n", var_key
brayDist = ep.distance(df, method='bray')
groups = list(df[var_key])
rand_groups = list(np.random.choice(np.unique(groups),
size=np.array(groups).shape))
ep.beta_dispersion(brayDist, groups, test='anova',
center='median', scores=False)
ep.beta_dispersion(brayDist, rand_groups, test='anova',
center='median', scores=False)
return brayDist
from sklearn.feature_selection import f_regression
def scalePCAcorrelate(df_numerical, df_w_mdata, metadata_cols, transformed):
rv = df_numerical.shape[0]
if transformed:
X_std2 = df_numerical.values.T
else:
X_std2 = StandardScaler().fit_transform(df_numerical.values.T)
rows_n, cols_n = X_std2.shape
print "\nPerforming PCA"
pca2 = PCA(n_components=100, random_state=42)
pca2.fit(X_std2)
no1 = pca2.explained_variance_ratio_[0]
no2 = pca2.explained_variance_ratio_[1]
print "Top two components explain {} and {} of variance.".format(no1, no2)
all_cors, p_comp_n, exp_vars, corr_ps = [], [], [], []
all_pvals, p_comp_nF, exp_vars2 = [], [], []
for mdata in metadata_cols:
md_arr = np.array(df_w_mdata[mdata])
raw_corrs = [ss.pearsonr(pca2.components_[i, :], md_arr) for i in range(100)]
corrs, c_pvals = zip(*raw_corrs)
if not np.all(np.isfinite(md_arr)):
print "Replacing {} not finite # with 0".format((~np.isfinite(md_arr)).sum())
md_arr[~np.isfinite(md_arr)] = 0
pvals = [f_regression(pca2.components_[i, :].reshape(rv, 1), md_arr)[1][0] for i in range(100)]
all_pvals.append(np.array(pvals).min())
all_cors.append(np.array(corrs).max())
pca_comp_no = np.argmax(np.array(corrs))
corr_ps.append(np.array(c_pvals)[pca_comp_no])
pca_comp_no2 = np.argmin(np.array(pvals))
p_comp_n.append(pca_comp_no+1)
p_comp_nF.append(pca_comp_no2+1)
exp_vars.append(pca2.explained_variance_ratio_[pca_comp_no])
exp_vars2.append(pca2.explained_variance_ratio_[pca_comp_no2])
data_ = np.vstack((all_cors, p_comp_n, exp_vars, corr_ps)).T
data_2 = np.vstack((all_pvals, p_comp_nF, exp_vars2)).T
colset = ['Correlation', 'Component', 'Explained Variance', 'P-value']
colset2 = ['Pvalue', 'Component_F', 'Explained Variance_F']
to_return = pd.DataFrame(data=data_, index=metadata_cols, columns=colset)
f_to_return = pd.DataFrame(data=data_2, index=metadata_cols, columns=colset2)
f_to_return.sort_values(['Component_F', 'Pvalue'],
ascending=[True, True],
inplace=True)
to_return.sort_values(['Component', 'Correlation'],
ascending=[True, False],
inplace=True)
final_return = to_return[to_return.Correlation.notnull()]
final_f_return = f_to_return[f_to_return.Pvalue.notnull()]
return final_return, final_f_return
def readChemData(chem_path, units, ftype, plotbool=False):
print os.path.basename(chem_path)
print units
if ftype == 'depth_profile':
site_chem_spec_df = pd.read_csv(chem_path, index_col=0,
parse_dates=True,
infer_datetime_format=True)
new_idx = []
for i in site_chem_spec_df.index:
new_idx.append(pd.Period(i, 'M'))
site_chem_spec_df.index = new_idx
elif ftype == 'surface_measure':
chem_spec_csv = pd.read_csv(chem_path, sep=",", index_col=0)
print "Null Values: {}".format(chem_spec_csv.isnull().sum().sum())
print "Database Shape: {}".format(chem_spec_csv.shape)
new_cols = [pd.Period(i) for i in chem_spec_csv.columns]
chem_spec_csv.columns = new_cols
site_chem_spec_df = chem_spec_csv.T.interpolate()
else:
sys.exit("invalid ftype")
if plotbool == True:
site_chem_spec_df.plot()
return site_chem_spec_df
def plotCommonTaxa(taxa_series):
taxa_list = list(taxa_series.values)
taxa_decomposition = []
for tax_str in taxa_list:
this_tl = tax_str.split(";")
clean_list = [i for i in this_tl if i[-2:] != "__" ]
taxa_decomposition += clean_list
unq_taxa = np.unique(taxa_decomposition, return_counts=True)
dfindex, dfdata = unq_taxa[0], unq_taxa[1]
taxa_df = pd.DataFrame(data=dfdata, index=dfindex, columns=['count'])
taxa_df.sort_values('count', ascending=False, inplace=True)
rtaxa_df = taxa_df.divide(taxa_df.sum(), axis=1)
total_called = []
t_levels_s = ['k', 'p', 'c', 'o', 'f', 'g', 's']
t_levels = ['kingdom', 'pylum', 'class', 'order', 'family', 'genus', 'species']
rtaxa_df = taxa_df.divide(taxa_df.sum(), axis=1)
for tL in t_levels_s:
posHits = [i for i in rtaxa_df.index if i[0] == tL]
subdf = rtaxa_df.ix[posHits, :]
print tL, subdf.sum()
total_called.append(subdf.sum())
t_level_rel = np.array(total_called)
width = 0.35
ind = np.arange(len(t_levels))
print "total pct%", t_level_rel.sum()
fig, ax = plt.subplots(1, 1)
ax.bar(ind + width, t_level_rel,width)
ax.set_xticks(ind + width)
ax.set_xticklabels(t_levels)
def inferTaxaLevel(taxa_series):
addSemicolon = lambda x: x+";"
taxa_edit = taxa_series.copy().apply(addSemicolon)
taxa_dict = taxa_edit.to_dict()
taxa_frame = taxa_edit.copy().to_frame()
taxa_depth = np.zeros(taxa_series.shape)
taxa_frame["Taxa Depth"] = taxa_depth
for seq, ts in taxa_dict.items():
taxa_frame.ix[seq, "Taxa Depth"] = 7 - ts.count("_;")
return taxa_frame
def analyze_alpha_diversity(decoder, derepped_otus_m, valPairs, var_type):
skippable = ['replicates', 'Index read length', 'Sequencing platform',
'Forward read length', 'Quadrants' ]
ad_cats = []
ad_cols = {"Mean":[], "Median":[], "Std":[], "N":[]}
for var in decoder.keys():
codes = decoder[var].keys()
for code in codes:
if var == 'date':
ad_cats.append(str(decoder[var][code]).split("T")[0]+" ("+var+")")
elif var in skippable:
pass
else:
ad_cats.append(str(decoder[var][code])+" ("+var+")")
if var in skippable:
pass
else:
try:
sub_bool = derepped_otus_m[var] == code
except TypeError:
sub_bool = derepped_otus_m[var] == float(code)
subdf = derepped_otus_m[sub_bool]
ad_cols["Median"].append(np.median(subdf.ix[:, var_type]))
ad_cols["Std"].append(subdf.ix[:, var_type].std())
ad_cols["N"].append(subdf.shape[0])
ad_cols["Mean"].append(subdf.ix[:, var_type].mean())
for idx, vp in enumerate(valPairs):
kitT, primT = vp[0], vp[1]
bool1 = derepped_otus_m.primers == primT
bool2 = derepped_otus_m.kit == kitT
subdf2 = derepped_otus_m[bool1 & bool2]
if idx == 2:
primer_outgroup = list(subdf2.index)
ad_cats.append(str(decoder['primers'][primT])+" & "+str(decoder['kit'][kitT]))
ad_cols["Median"].append(np.median(subdf2.ix[:, var_type]))
ad_cols["Std"].append(subdf2.ix[:, var_type].std())
ad_cols["N"].append(subdf2.shape[0])
ad_cols["Mean"].append(subdf2.ix[:, var_type].mean())
alpha_df = pd.DataFrame(data=ad_cols, index=ad_cats)
return alpha_df, primer_outgroup
def alpha_diversity(dereplicated_otus, derepped_m, metrics):
derepped_otus_m = derepped_m.copy()
row_sum = dereplicated_otus.copy().sum(axis=1)
row_rel = dereplicated_otus.copy().divide(row_sum, axis=0).astype('float64')
if 'enspie' in metrics:
enspie_sq = row_rel.apply(np.square)
enspie_dom = enspie_sq.sum(axis=1)
enspie_ = enspie_dom**-1
derepped_otus_m['enspie'] = enspie_
if 'shannon' in metrics:
entrop = lambda n: n*np.log(n)
shannon_ = row_rel.replace({ 0 : np.nan }).applymap(entrop).T.sum()*-1.0
derepped_otus_m['shannon'] = shannon_.apply(np.exp)
if 'chao1' in metrics:
total_s = (dereplicated_otus > 0).T.sum()
singletons = (dereplicated_otus == 1).T.sum()
doubletons = (dereplicated_otus == 2).T.sum()
numerator = singletons.multiply(singletons-1)
denominator = 2*(doubletons+1)
chao1_ = total_s + numerator.divide(denominator, axis=0)
derepped_otus_m['chao1'] = chao1_
return dereplicated_otus, derepped_otus_m
def plotCountTotalsByMetadata(df_m, decoder, mList, mVar, fignum):
## Get Normalized Count Totals
# drop everything but the key grouping variable and sum
mList.remove(mVar)
counts_Depth = df_m.drop(mList, axis=1)
depthGroup = counts_Depth.groupby([mVar]).sum()
# find the number of samples per grouping
(_, n_per_depth) = np.unique(df_m[mVar].values, return_counts=True)
# average the total counts per group by the number of samples
mean_counts_per_group = depthGroup.T.sum().divide(n_per_depth)
## Get Standard Deviation of Count Totals
# Drop depths & sum each sample before grouping
just_counts = counts_Depth.drop([mVar], axis=1)
depthSum = just_counts.T.sum().to_frame()
# Convert Series into DataFrame, add col names, and modify dtype
depthSum[mVar] = df_m[mVar].values
depthSum.columns = ['counts', mVar]
depthSum = depthSum.applymap(pd.to_numeric)
# group each sum by depth and flatten by std
depthStd = depthSum.groupby([mVar]).std()
# convert labels for display
if mVar == 'date':
decoded_labs = [str(decoder[mVar][i]).split("T")[0] for i in list(np.unique(df_m[mVar].values))]
else:
decoded_labs = [str(decoder[mVar][i]) for i in list(np.unique(df_m[mVar].values))]
# Setup Plot
width = 0.35; ind = np.arange(len(n_per_depth));
plt.figure(fignum, figsize=(14, 8))
ax = plt.gca()
ax.bar(ind + width, mean_counts_per_group, width,
yerr=depthStd.values.flatten())
ax.set_xticks(ind + width)
ax.set_xticklabels(decoded_labs)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45)
ax.set_xlim(0.0, float(len(list(np.unique(df_m[mVar].values)))))
ax.set_xlabel(mVar.capitalize()+" (m)")
ax.set_ylabel("Average Total OTUs (n)")
# Add back metadata variable
mList.append(mVar)
def replicateAlphaDiv(df, metric, rep_groups):
enspie_cv = []
enspie_1 = []
enspie_2 = []
for g in rep_groups:
this_grp = df.ix[g, metric]
enspie_cv.append(this_grp.std() / this_grp.mean())
justTwo = list(np.random.choice(g, size=2, replace=False))
enspie_1.append(df.ix[justTwo[0], metric])
enspie_2.append(df.ix[justTwo[1], metric])
enspie_1 = np.array(enspie_1)
enspie_2 = np.array(enspie_2)
return (enspie_cv, enspie_1, enspie_2)
from sklearn.model_selection import train_test_split
def lda_tuner(ingroup_otu, best_models):
best_score = -1*np.inf
dtp_series = [0.0001, 0.001, 0.01, 0.1, 0.2]
twp_series = [0.0001, 0.001, 0.01, 0.1, 0.2]
topic_series = [3]
X = ingroup_otu.values
eval_counter = 0
for topics in topic_series:
for dtp in dtp_series:
for twp in twp_series:
eval_counter +=1
X_train, X_test = train_test_split(X, test_size=0.5)
lda = LatentDirichletAllocation(n_topics=topics,
doc_topic_prior=dtp,
topic_word_prior=twp,
learning_method='batch',
random_state=42,
max_iter=20)
lda.fit(X_train)
this_score = lda.score(X_test)
this_perplexity = lda.perplexity(X_test)
if this_score > best_score:
best_score = this_score
print "New Max Likelihood: {}".format(best_score)
print "#{}: n:{}, dtp:{}, twp:{}, score:{}, perp:{}".format(eval_counter,
topics, dtp, twp,
this_score, this_perplexity)
best_models.append({'n': topics, 'dtp': dtp, 'twp': twp,
'score': this_score, 'perp': this_perplexity})
if (dtp == dtp_series[-1]) and (twp == twp_series[-1]):
eval_counter +=1
X_train, X_test = train_test_split(X, test_size=0.5)
lda = LatentDirichletAllocation(n_topics=topics,
doc_topic_prior=1./topics,
topic_word_prior=1./topics,
learning_method='batch',
random_state=42,
max_iter=20)
lda.fit(X_train)
this_score = lda.score(X_test)
this_perplexity = lda.perplexity(X_test)
if this_score > best_score:
best_score = this_score
print "New Max Likelihood: {}".format(best_score)
print "#{}: n:{}, dtp:{}, twp:{}, score:{} perp: {}".format(eval_counter,
topics,
(1./topics),
(1./topics),
this_score,
this_perplexity)
best_models.append({'n': topics, 'dtp': (1./topics),
'twp': (1./topics), 'score': this_score,
'perp': this_perplexity})
return best_models
def collapseBdiversity(dist_mat, raw_data_m, metaData_var, verbose=False):
metaOptions = np.unique(raw_data_m.ix[:, metaData_var])
n_ = len(metaOptions)
metaDistance = np.full((n_, n_), np.nan)
metaDeviation = np.full((n_, n_), np.nan)
for r_i, r in enumerate(metaOptions):
for c_i, c in enumerate(metaOptions):
dist_copy = dist_mat.copy()
dist_copy[metaData_var] = raw_data_m.ix[:, metaData_var]
dist_filt_1 = dist_copy[dist_copy[metaData_var] == r]
dist_filt_1.drop([metaData_var], axis=1, inplace=True)
dist_filt_t = dist_filt_1.T
dist_filt_t[metaData_var] = raw_data_m.ix[:, metaData_var]
dist_filt_2 = dist_filt_t[dist_filt_t[metaData_var] == c]
dist_filt = dist_filt_2.drop([metaData_var], axis=1)
dist_filt_flat = dist_filt.values.flatten()
dist_filt_nz = dist_filt_flat[dist_filt_flat != 0]
mD = np.median(dist_filt_nz)
mDev = dist_filt_nz.std()
if verbose:
print "{} versus {} metadistance".format(r, c)
print "\t {} ({})".format(mD, mDev)
metaDistance[r_i,c_i] = mD
metaDeviation[r_i,c_i] = mDev
return metaDistance, metaDeviation
"""
http://qiime.org/scripts/make_otu_network.html
http://qiime.org/scripts/differential_abundance.html
# alternative to random forest classifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
clf = GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True,
n_jobs=-1)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
"""
import subprocess as sp
from sklearn import tree
def write_tree_to_png(fname_no_ext, rf):
tree.export_graphviz(rf, out_file=fname_no_ext+".dot")
base_cmd = "dot -Tpng {}.dot > {}.png".format(fname_no_ext, fname_no_ext)
p = sp.Popen(base_cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
return stdout
def bz2wrapper(fpath):
if fpath.endswith("bz2"):
print "Unzipping"
base_cmd = "bzip2 -d {}".format(fpath)
else:
base_cmd = "bzip2 -z {}".format(fpath)
p = sp.Popen(base_cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
return stdout
def DESeqRlog(df_otus, df_m, saved_file=False):
if not saved_file:
path = "/Users/login/Desktop/vst_temp.csv"
path2 = "/Users/login/Desktop/date_temp.csv"
wrapper_file = os.path.join(os.getcwd(), "DESeqwrapper.R")
base_cmd = "Rscript DESeqwrapper.R"
if not os.path.exists(wrapper_file):
sys.exit("Accessory script missing")
# direct export path
to_transform = path
shared_otus = df_otus.T.copy()
arg_1 = os.path.dirname(to_transform)
arg_2 = to_transform.split("/")[-1]
arg_3 = os.path.basename(path2)
# export data to disk
date_series = df_m.ix[:, 'Sequencing Date']
date_series.to_csv(path2)
shared_otus.to_csv(to_transform)
# communicate with the world
cmd = " ".join([base_cmd, arg_1, arg_2, arg_3])
p = sp.Popen(cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
if "Execution halted" in stderr:
sys.exit("R wrapper failed")
to_read_back = os.path.join(arg_1, arg_2.split(".")[0]+"_vst.csv")
rlog_otus = pd.read_csv(to_read_back, index_col = 0)
dropped_otus = len(rlog_otus.columns) - len(shared_otus.columns)
if dropped_otus > 0:
print "{} OTUS dropped".format(dropped_otus)
for i, j in zip(rlog_otus.index, shared_otus.index):
assert i == j
for temps in [to_read_back, to_transform]:
os.remove(temps)
else:
saved_file = os.path.join(os.getcwd(), "rlog_saved.csv")
rlog_otus = pd.read_csv(saved_file, index_col = 0)
dropped_otus = len(rlog_otus.columns) - len(shared_otus.columns)
if dropped_otus > 0:
print "{} OTUS dropped".format(dropped_otus)
for i, j in zip(rlog_otus.index, shared_otus.index):
assert i == j
# return only those columns not in both dfs
mdata_cols = set(df_otus).symmetric_difference(set(df_m.columns))
# copy transformed matrix
rlog_m = rlog_otus.copy()
# add back metadata
for mc in mdata_cols:
rlog_m[mc] = df_m.ix[:, mc]
return rlog_otus, rlog_m
def edgeRtmm(df_otus, df_m):
"""
Uses edgeR's variance stabilizing transformation to transform sequence counts
into OTU abundances. Involves adding and subtracting psuedocounts.
"""
path = "/Users/login/Desktop/vst_temp.csv"
print "Performing TMM Transform"
wrapper_file = os.path.join(os.getcwd(), "edgeRwrapper_tmm.R")
base_cmd = "Rscript edgeRwrapper_tmm.R"
if not os.path.exists(wrapper_file):
sys.exit("Accessory script missing")
# direct export path
to_transform = path
shared_otus = df_otus.copy()
arg_1 = os.path.dirname(to_transform)
arg_2 = to_transform.split("/")[-1]
# export data to disk
shared_otus.to_csv(to_transform, sep=",")
cmd = " ".join([base_cmd, arg_1, arg_2])
p = sp.Popen(cmd, cwd=os.getcwd(), shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
stdout, stderr = p.communicate()
if "Execution halted" in stderr:
sys.exit("R wrapper failed")
to_read_back = os.path.join(arg_1, arg_2.split(".")[0]+"_vst.csv")
shared_vst = pd.read_csv(to_read_back, index_col = 0)
dropped_otus = len(shared_vst.columns) - len(shared_otus.columns)
if dropped_otus > 0:
print "{} OTUS dropped".format(dropped_otus)
for i, j in zip(shared_vst.index, shared_otus.index):
assert i == j
for temps in [to_read_back, to_transform]:
os.remove(temps)
vs_T_m = shared_vst.copy()
lost_cols = [i for i in df_m.columns if not i.startswith('seq')]
alphaList = ['enspie', 'shannon', 'chao1']
for alpha_d in alphaList:
if alpha_d in lost_cols:
lost_cols.remove(alpha_d)
for lost_col in lost_cols:
vs_T_m[lost_col] = df_m[lost_col]
vs_T_otus, vs_T_m = alpha_diversity(shared_vst, vs_T_m, alphaList)
return vs_T_otus, vs_T_m
def importratesandconcs_mod(path_):
assert os.path.exists(path_)
conc_f_dict = {"concs_1.txt" : "O",
"concs_2.txt" : "C",
"concs_3.txt" : "N+",
"concs_4.txt" : "N-",
"concs_5.txt" : "S+",
"concs_6.txt" : "S-",
"concs_7.txt" : "Fe+",
"concs_8.txt" : "Fe-",
"concs_9.txt" : "CH4",
"concs_10.txt" : "Null"}
rate_f_dict = { "rate_1.txt" : "iron_oxidation_(oxygen)",
"rate_2.txt" : "ammonia_oxidation",
"rate_3.txt" : "sulfur_oxidation",
"rate_4.txt" : "iron_oxidation_(nitrate)",
"rate_5.txt" : "methanotrophy_(oxygen)",
"rate_6.txt" : "methanotrophy_(sulfate)",
"rate_7.txt" : "aerobic_heterotrophy",
"rate_8.txt" : "denitrification",
"rate_9.txt" : "iron_reduction",
"rate_10.txt" : "sulfate_reduction",
"rate_11.txt" : "methanogenesis" }
rate_dict, conc_dict = {}, {}
for d_dict, f_dict in zip([conc_dict, rate_dict], [conc_f_dict, rate_f_dict]):
for c_f, spec in f_dict.items():
c_p = os.path.join(path_, c_f)
c_df = pd.read_csv(c_p, sep="\t", header=None)
d_dict[spec] = c_df
print "{} has shape {}".format(spec, c_df.shape)
return (rate_dict, conc_dict)
from scipy.interpolate import interp1d
def time_scale_modeled_chem_data(rate_dict, conc_dict, n_days, start_date, end_date):
"""
1. Create a date index for the new dataframes
2. Create new dictionaries to hold the new dataframes
3. Unload each DF one at a time
4. Interpolate each depth vector along new axis
5. Load into new numpy array
6. Assign date index & numpy array to new dataframe object
7. Reload new dataframe into new dictionary, accessible by name string
8. Return newly minted dictionaries
"""
dr = pd.date_range(start_date, end_date)
assert len(dr) == n_days
new_rate_dict, new_conc_dict = {}, {}
for a_dict, new_dict in zip([rate_dict, conc_dict], [new_rate_dict, new_conc_dict]):
for a_spec in a_dict.keys():
this_df = a_dict[a_spec]
depths, columns = this_df.shape
n_slices = columns
assert n_slices < n_days
idx = np.arange(n_slices)
new_interval = max(idx) / float(n_days)
new_columns = np.arange(idx.min(), idx.max(), new_interval)
new_df_data = np.zeros((depths, len(new_columns)))
for depth in xrange(depths):
a_vector = this_df.ix[depth, :].values
f2 = interp1d(idx, a_vector, kind='cubic')
new_df_data[depth, :] = f2(new_columns)
new_df = pd.DataFrame(data=new_df_data.T, columns=np.arange(6,6+depths),
index=dr)
new_dict[a_spec] = new_df.T.unstack()
rate_cols = sorted(new_rate_dict.keys())
conc_cols = sorted(new_conc_dict.keys())
all_cols = rate_cols + conc_cols
full_idx = new_rate_dict[rate_cols[0]].index
full_df = pd.DataFrame(index=full_idx, columns=all_cols)
for name in all_cols:
if name in rate_cols:
full_df.ix[:, name] = new_rate_dict[name]
elif name in conc_cols:
full_df.ix[:, name] = new_conc_dict[name]
return full_df
def preheim_date_parser(date_str):
date_part = date_str.split("_")[1]
new_str = date_part[2:4] + "-" + date_part[4:6] + "-" + date_part[0:2]
return pd.to_datetime(new_str)
def importratesandconcs_obs(chem_dir):
assert os.path.exists(chem_dir)
obs_conc_f_dict = { "Chloride" : "Cl_mg_ClL-1.txt",
"Dissolved Oxygen" : "DO.txt",
"Nitrate" : "NO3_mg_NL-1.txt",
"Conductance" : "SCP.txt",
"Sulfate" : "SO4_mg_SL-1.txt",
"Temperature" : "TEMP.txt" }
obs_conc_df_dict = {}
for name, fname in obs_conc_f_dict.items():
this_path = os.path.join(chem_dir, fname)
this_df = pd.read_csv(this_path, sep="\t", index_col=0, dtype=float)
this_df.columns = map(preheim_date_parser, this_df.columns)
if name == "Temperature" or name == "Conductance":
this_df.ix[1, '2012-11-12'] = this_df.ix[2, '2012-11-02']
this_df.interpolate(axis=0, inplace=True)
surf_null_mask = this_df.ix[0, :].isnull().values
this_df.ix[0, surf_null_mask] = this_df.ix[1, surf_null_mask]
this_df = this_df.T
if name == "Dissolved Oxygen":
idx_to_drop = this_df.index[5]
this_df.drop(idx_to_drop, axis=0, inplace=True)
# new_cols = list(this_df.columns)
# new_cols.reverse()
# this_df.columns = new_cols
this_df.columns = [int(i) for i in this_df.columns]
print "Total Null Vals in {}: {}".format(name, this_df.isnull().sum().sum())
obs_conc_df_dict[name] = this_df.T.unstack()
conc_cols = sorted(obs_conc_df_dict.keys())
full_idx = obs_conc_df_dict[conc_cols[0]].index
obs_conc_df = pd.DataFrame(index=full_idx, columns=conc_cols)
for name in conc_cols:
obs_conc_df.ix[:, name] = obs_conc_df_dict[name]
return obs_conc_df
def extract_linkages(row_clusters, labels):
clusters = {}
for row in xrange(row_clusters.shape[0]):
cluster_n = row + len(labels)
# which clusters / labels are present in this row
glob1, glob2 = row_clusters[row, 0], row_clusters[row, 1]
# if this is a cluster, pull the cluster
this_clust = []
for glob in [glob1, glob2]:
if glob > (len(labels)-1):
this_clust += clusters[glob]
# if it isn't, add the label to this cluster
else:
this_clust.append(glob)
clusters[cluster_n] = this_clust
return clusters
|
|
import random
from diofant.combinatorics import Permutation
from diofant.combinatorics.permutations import Perm
from diofant.combinatorics.tensor_can import (bsgs_direct_product,
canonicalize, dummy_sgs,
get_symmetric_group_sgs,
perm_af_direct_product,
riemann_bsgs)
from diofant.combinatorics.testutil import (canonicalize_naive,
graph_certificate)
__all__ = ()
def test_perm_af_direct_product():
gens1 = [[1, 0, 2, 3], [0, 1, 3, 2]]
gens2 = [[1, 0]]
assert perm_af_direct_product(gens1, gens2, 0) == [[1, 0, 2, 3, 4, 5], [0, 1, 3, 2, 4, 5], [0, 1, 2, 3, 5, 4]]
gens1 = [[1, 0, 2, 3, 5, 4], [0, 1, 3, 2, 4, 5]]
gens2 = [[1, 0, 2, 3]]
assert [[1, 0, 2, 3, 4, 5, 7, 6], [0, 1, 3, 2, 4, 5, 6, 7], [0, 1, 2, 3, 5, 4, 6, 7]]
def test_dummy_sgs():
a = dummy_sgs([1, 2], 0, 4)
assert a == [[0, 2, 1, 3, 4, 5]]
a = dummy_sgs([2, 3, 4, 5], 0, 8)
assert a == [x._array_form for x in [Perm(9)(2, 3), Perm(9)(4, 5),
Perm(9)(2, 4)(3, 5)]]
a = dummy_sgs([2, 3, 4, 5], 1, 8)
assert a == [x._array_form for x in [Perm(2, 3)(8, 9), Perm(4, 5)(8, 9),
Perm(9)(2, 4)(3, 5)]]
def test_get_symmetric_group_sgs():
assert get_symmetric_group_sgs(2) == ([0], [Permutation(3)(0, 1)])
assert get_symmetric_group_sgs(2, 1) == ([0], [Permutation(0, 1)(2, 3)])
assert get_symmetric_group_sgs(3) == ([0, 1], [Permutation(4)(0, 1), Permutation(4)(1, 2)])
assert get_symmetric_group_sgs(3, 1) == ([0, 1], [Permutation(0, 1)(3, 4), Permutation(1, 2)(3, 4)])
assert get_symmetric_group_sgs(4) == ([0, 1, 2], [Permutation(5)(0, 1), Permutation(5)(1, 2), Permutation(5)(2, 3)])
assert get_symmetric_group_sgs(4, 1) == ([0, 1, 2], [Permutation(0, 1)(4, 5), Permutation(1, 2)(4, 5), Permutation(2, 3)(4, 5)])
def test_canonicalize_no_slot_sym():
# cases in which there is no slot symmetry after fixing the
# free indices; here and in the following if the symmetry of the
# metric is not specified, it is assumed to be symmetric.
# If it is not specified, tensors are commuting.
# A_d0 * B^d0; g = [1,0, 2,3]; T_c = A^d0*B_d0; can = [0,1,2,3]
base1, gens1 = get_symmetric_group_sgs(1)
dummies = [0, 1]
g = Permutation([1, 0, 2, 3])
can = canonicalize(g, dummies, 0, (base1, gens1, 1, 0), (base1, gens1, 1, 0))
assert can == [0, 1, 2, 3]
# equivalently
can = canonicalize(g, dummies, 0, (base1, gens1, 2, None))
assert can == [0, 1, 2, 3]
# with antisymmetric metric; T_c = -A^d0*B_d0; can = [0,1,3,2]
can = canonicalize(g, dummies, 1, (base1, gens1, 1, 0), (base1, gens1, 1, 0))
assert can == [0, 1, 3, 2]
# A^a * B^b; ord = [a,b]; g = [0,1,2,3]; can = g
g = Permutation([0, 1, 2, 3])
dummies = []
t0 = t1 = (base1, gens1, 1, 0)
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [0, 1, 2, 3]
# B^b * A^a
g = Permutation([1, 0, 2, 3])
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [1, 0, 2, 3]
# A symmetric
# A^{b}_{d0}*A^{d0, a} order a,b,d0,-d0; T_c = A^{a d0}*A{b}_{d0}
# g = [1,3,2,0,4,5]; can = [0,2,1,3,4,5]
base2, gens2 = get_symmetric_group_sgs(2)
dummies = [2, 3]
g = Permutation([1, 3, 2, 0, 4, 5])
can = canonicalize(g, dummies, 0, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 4, 5]
# with antisymmetric metric
can = canonicalize(g, dummies, 1, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 4, 5]
# A^{a}_{d0}*A^{d0, b}
g = Permutation([0, 3, 2, 1, 4, 5])
can = canonicalize(g, dummies, 1, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 5, 4]
# A, B symmetric
# A^b_d0*B^{d0,a}; g=[1,3,2,0,4,5]
# T_c = A^{b,d0}*B_{a,d0}; can = [1,2,0,3,4,5]
dummies = [2, 3]
g = Permutation([1, 3, 2, 0, 4, 5])
can = canonicalize(g, dummies, 0, (base2, gens2, 1, 0), (base2, gens2, 1, 0))
assert can == [1, 2, 0, 3, 4, 5]
# same with antisymmetric metric
can = canonicalize(g, dummies, 1, (base2, gens2, 1, 0), (base2, gens2, 1, 0))
assert can == [1, 2, 0, 3, 5, 4]
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d0*C_d1; can = [0,2,1,3,4,5]
base1, gens1 = get_symmetric_group_sgs(1)
base2, gens2 = get_symmetric_group_sgs(2)
g = Permutation([2, 1, 0, 3, 4, 5])
dummies = [0, 1, 2, 3]
t0 = (base2, gens2, 1, 0)
t1 = t2 = (base1, gens1, 1, 0)
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [0, 2, 1, 3, 4, 5]
# A without symmetry
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5]
g = Permutation([2, 1, 0, 3, 4, 5])
dummies = [0, 1, 2, 3]
t0 = ([], [Permutation(list(range(4)))], 1, 0)
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [0, 2, 3, 1, 4, 5]
# A, B without symmetry
# A^{d1}_{d0}*B_{d1}^{d0}; g = [2,1,3,0,4,5]
# T_c = A^{d0 d1}*B_{d0 d1}; can = [0,2,1,3,4,5]
t0 = t1 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [0, 1, 2, 3]
g = Permutation([2, 1, 3, 0, 4, 5])
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [0, 2, 1, 3, 4, 5]
# A_{d0}^{d1}*B_{d1}^{d0}; g = [1,2,3,0,4,5]
# T_c = A^{d0 d1}*B_{d1 d0}; can = [0,2,3,1,4,5]
g = Permutation([1, 2, 3, 0, 4, 5])
can = canonicalize(g, dummies, 0, t0, t1)
assert can == [0, 2, 3, 1, 4, 5]
# A, B, C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c=A^{d0 d1}*B_{a d1}*C_{d0 b}; can = [2,4,0,5,3,1,6,7]
t0 = t1 = t2 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [2, 3, 4, 5]
g = Permutation([4, 2, 0, 3, 5, 1, 6, 7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2, 4, 0, 5, 3, 1, 6, 7]
# A symmetric, B and C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c = A^{d0 d1}*B_{a d0}*C_{d1 b}; can = [2,4,0,3,5,1,6,7]
t0 = (base2, gens2, 1, 0)
t1 = t2 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [2, 3, 4, 5]
g = Permutation([4, 2, 0, 3, 5, 1, 6, 7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2, 4, 0, 3, 5, 1, 6, 7]
# A and C symmetric, B without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c = A^{d0 d1}*B_{a d0}*C_{b d1}; can = [2,4,0,3,1,5,6,7]
t0 = t2 = (base2, gens2, 1, 0)
t1 = ([], [Permutation(list(range(4)))], 1, 0)
dummies = [2, 3, 4, 5]
g = Permutation([4, 2, 0, 3, 5, 1, 6, 7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2, 4, 0, 3, 1, 5, 6, 7]
# A symmetric, B without symmetry, C antisymmetric
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# g=[4,2,0,3,5,1,6,7]
# T_c = -A^{d0 d1}*B_{a d0}*C_{b d1}; can = [2,4,0,3,1,5,7,6]
t0 = (base2, gens2, 1, 0)
t1 = ([], [Permutation(list(range(4)))], 1, 0)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
t2 = (base2a, gens2a, 1, 0)
dummies = [2, 3, 4, 5]
g = Permutation([4, 2, 0, 3, 5, 1, 6, 7])
can = canonicalize(g, dummies, 0, t0, t1, t2)
assert can == [2, 4, 0, 3, 1, 5, 7, 6]
def test_canonicalize_no_dummies():
base1, gens1 = get_symmetric_group_sgs(1)
base2, gens2 = get_symmetric_group_sgs(2)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
# A commuting
# A^c A^b A^a; ord = [a,b,c]; g = [2,1,0,3,4]
# T_c = A^a A^b A^c; can = list(range(5))
g = Permutation([2, 1, 0, 3, 4])
can = canonicalize(g, [], 0, (base1, gens1, 3, 0))
assert can == list(range(5))
# A anticommuting
# A^c A^b A^a; ord = [a,b,c]; g = [2,1,0,3,4]
# T_c = -A^a A^b A^c; can = [0,1,2,4,3]
g = Permutation([2, 1, 0, 3, 4])
can = canonicalize(g, [], 0, (base1, gens1, 3, 1))
assert can == [0, 1, 2, 4, 3]
# A commuting and symmetric
# A^{b,d}*A^{c,a}; ord = [a,b,c,d]; g = [1,3,2,0,4,5]
# T_c = A^{a c}*A^{b d}; can = [0,2,1,3,4,5]
g = Permutation([1, 3, 2, 0, 4, 5])
can = canonicalize(g, [], 0, (base2, gens2, 2, 0))
assert can == [0, 2, 1, 3, 4, 5]
# A anticommuting and symmetric
# A^{b,d}*A^{c,a}; ord = [a,b,c,d]; g = [1,3,2,0,4,5]
# T_c = -A^{a c}*A^{b d}; can = [0,2,1,3,5,4]
g = Permutation([1, 3, 2, 0, 4, 5])
can = canonicalize(g, [], 0, (base2, gens2, 2, 1))
assert can == [0, 2, 1, 3, 5, 4]
# A^{c,a}*A^{b,d} ; g = [2,0,1,3,4,5]
# T_c = A^{a c}*A^{b d}; can = [0,2,1,3,4,5]
g = Permutation([2, 0, 1, 3, 4, 5])
can = canonicalize(g, [], 0, (base2, gens2, 2, 1))
assert can == [0, 2, 1, 3, 4, 5]
def test_no_metric_symmetry():
# no metric symmetry
# A^d1_d0 * A^d0_d1; ord = [d0,-d0,d1,-d1]; g= [2,1,0,3,4,5]
# T_c = A^d0_d1 * A^d1_d0; can = [0,3,2,1,4,5]
g = Permutation([2, 1, 0, 3, 4, 5])
can = canonicalize(g, list(range(4)), None, [[], [Permutation(list(range(4)))], 2, 0])
assert can == [0, 3, 2, 1, 4, 5]
# A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0
# ord = [d0,-d0,d1,-d1,d2,-d2,d3,-d3]
# 0 1 2 3 4 5 6 7
# g = [2,5,0,7,4,3,6,1,8,9]
# T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2
# can = [0,3,2,1,4,7,6,5,8,9]
g = Permutation([2, 5, 0, 7, 4, 3, 6, 1, 8, 9])
# can = canonicalize(g, list(range(8)), 0, [[], [list(range(4))], 4, 0])
# assert can == [0, 2, 3, 1, 4, 6, 7, 5, 8, 9]
can = canonicalize(g, list(range(8)), None, [[], [Permutation(list(range(4)))], 4, 0])
assert can == [0, 3, 2, 1, 4, 7, 6, 5, 8, 9]
# A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1
# g = [0,5,2,7,6,1,4,3,8,9]
# T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0
# can = [0,3,2,5,4,7,6,1,8,9]
g = Permutation([0, 5, 2, 7, 6, 1, 4, 3, 8, 9])
can = canonicalize(g, list(range(8)), None, [[], [Permutation(list(range(4)))], 4, 0])
assert can == [0, 3, 2, 5, 4, 7, 6, 1, 8, 9]
g = Permutation([12, 7, 10, 3, 14, 13, 4, 11, 6, 1, 2, 9, 0, 15, 8, 5, 16, 17])
can = canonicalize(g, list(range(16)), None, [[], [Permutation(list(range(4)))], 8, 0])
assert can == [0, 3, 2, 5, 4, 7, 6, 1, 8, 11, 10, 13, 12, 15, 14, 9, 16, 17]
def test_canonical_free():
# t = A^{d0 a1}*A_d0^a0
# ord = [a0,a1,d0,-d0]; g = [2,1,3,0,4,5]; dummies = [[2,3]]
# t_c = A_d0^a0*A^{d0 a1}
# can = [3,0, 2,1, 4,5]
g = Permutation([2, 1, 3, 0, 4, 5])
dummies = [[2, 3]]
can = canonicalize(g, dummies, [None], ([], [Permutation(3)], 2, 0))
assert can == [3, 0, 2, 1, 4, 5]
def test_canonicalize1():
base1, gens1 = get_symmetric_group_sgs(1)
base1a, gens1a = get_symmetric_group_sgs(1, 1)
base2, gens2 = get_symmetric_group_sgs(2)
base3, gens3 = get_symmetric_group_sgs(3)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
base3a, gens3a = get_symmetric_group_sgs(3, 1)
# A_d0*A^d0; ord = [d0,-d0]; g = [1,0,2,3]
# T_c = A^d0*A_d0; can = [0,1,2,3]
g = Permutation([1, 0, 2, 3])
can = canonicalize(g, [0, 1], 0, (base1, gens1, 2, 0))
assert can == list(range(4))
# A commuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0; ord=[d0,-d0,d1,-d1,d2,-d2]
# g = [1,3,5,4,2,0,6,7]
# T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2; can = list(range(8))
g = Permutation([1, 3, 5, 4, 2, 0, 6, 7])
can = canonicalize(g, list(range(6)), 0, (base1, gens1, 6, 0))
assert can == list(range(8))
# A anticommuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0; ord=[d0,-d0,d1,-d1,d2,-d2]
# g = [1,3,5,4,2,0,6,7]
# T_c 0; can = 0
g = Permutation([1, 3, 5, 4, 2, 0, 6, 7])
can = canonicalize(g, list(range(6)), 0, (base1, gens1, 6, 1))
assert can == 0
can1 = canonicalize_naive(g, list(range(6)), 0, (base1, gens1, 6, 1))
assert can1 == 0
# A commuting symmetric
# A^{d0 b}*A^a_d1*A^d1_d0; ord=[a,b,d0,-d0,d1,-d1]
# g = [2,1,0,5,4,3,6,7]
# T_c = A^{a d0}*A^{b d1}*A_{d0 d1}; can = [0,2,1,4,3,5,6,7]
g = Permutation([2, 1, 0, 5, 4, 3, 6, 7])
can = canonicalize(g, list(range(2, 6)), 0, (base2, gens2, 3, 0))
assert can == [0, 2, 1, 4, 3, 5, 6, 7]
# A, B commuting symmetric
# A^{d0 b}*A^d1_d0*B^a_d1; ord=[a,b,d0,-d0,d1,-d1]
# g = [2,1,4,3,0,5,6,7]
# T_c = A^{b d0}*A_d0^d1*B^a_d1; can = [1,2,3,4,0,5,6,7]
g = Permutation([2, 1, 4, 3, 0, 5, 6, 7])
can = canonicalize(g, list(range(2, 6)), 0, (base2, gens2, 2, 0), (base2, gens2, 1, 0))
assert can == [1, 2, 3, 4, 0, 5, 6, 7]
# A commuting symmetric
# A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1]
# g = [4,2,1,0,5,3,6,7]
# T_c = A^{a d0 d1}*A^{b}_{d0 d1}; can = [0,2,4,1,3,5,6,7]
g = Permutation([4, 2, 1, 0, 5, 3, 6, 7])
can = canonicalize(g, list(range(2, 6)), 0, (base3, gens3, 2, 0))
assert can == [0, 2, 4, 1, 3, 5, 6, 7]
# A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0
# ord = [a0,a1,a2,a3,d0,-d0,d1,-d1,d2,-d2,d3,-d3]
# 0 1 2 3 4 5 6 7 8 9 10 11
# g = [10,4,8, 0,7,9, 6,11,1, 2,3,5, 12,13]
# T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3}
# can = [0,4,6, 1,5,8, 2,3,10, 7,9,11, 12,13]
g = Permutation([10, 4, 8, 0, 7, 9, 6, 11, 1, 2, 3, 5, 12, 13])
can = canonicalize(g, list(range(4, 12)), 0, (base3, gens3, 4, 0))
assert can == [0, 4, 6, 1, 5, 8, 2, 3, 10, 7, 9, 11, 12, 13]
# A commuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# ord = [d0,-d0,d1,-d1,d2,-d2,d3,-d3]
# g = [0,2,4,5,7,3,1,6,8,9]
# in this esxample and in the next three,
# renaming dummy indices and using symmetry of A,
# T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = 0
g = Permutation([0, 2, 4, 5, 7, 3, 1, 6, 8, 9])
can = canonicalize(g, list(range(8)), 0, (base3, gens3, 2, 0), (base2a, gens2a, 1, 0))
assert can == 0
# A anticommuting symmetric, B anticommuting
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
# can = [0,2,4, 1,3,6, 5,7, 8,9]
can = canonicalize(g, list(range(8)), 0, (base3, gens3, 2, 1), (base2a, gens2a, 1, 0))
assert can == [0, 2, 4, 1, 3, 6, 5, 7, 8, 9]
# A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
# can = [0,2,4, 1,3,6, 5,7, 9,8]
can = canonicalize(g, list(range(8)), 1, (base3, gens3, 2, 1), (base2a, gens2a, 1, 0))
assert can == [0, 2, 4, 1, 3, 6, 5, 7, 9, 8]
# A anticommuting symmetric, B anticommuting anticommuting,
# no metric symmetry
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = [0,2,4, 1,3,7, 5,6, 8,9]
can = canonicalize(g, list(range(8)), None, (base3, gens3, 2, 1), (base2a, gens2a, 1, 0))
assert can == [0, 2, 4, 1, 3, 7, 5, 6, 8, 9]
# Gamma anticommuting
# Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha}
# ord = [alpha, rho, mu,-mu,nu,-nu]
# g = [3,5,1,4,2,0,6,7]
# T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu}
# can = [2,4,1,0,3,5,7,6]]
g = Permutation([3, 5, 1, 4, 2, 0, 6, 7])
t0 = (base2a, gens2a, 1, None)
t1 = (base1, gens1, 1, None)
t2 = (base3a, gens3a, 1, None)
can = canonicalize(g, list(range(2, 6)), 0, t0, t1, t2)
assert can == [2, 4, 1, 0, 3, 5, 7, 6]
# Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha}
# ord = [alpha, beta, gamma, -rho, mu,-mu,nu,-nu]
# 0 1 2 3 4 5 6 7
# g = [5,7,2,1,3,6,4,0,8,9]
# T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu} # can = [4,6,1,2,3,0,5,7,8,9]
t0 = (base2a, gens2a, 2, None)
g = Permutation([5, 7, 2, 1, 3, 6, 4, 0, 8, 9])
can = canonicalize(g, list(range(4, 8)), 0, t0, t1, t2)
assert can == [4, 6, 1, 2, 3, 0, 5, 7, 8, 9]
# f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry
# f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b}
# ord = [mu,-mu,nu,-nu,a,-a,b,-b,c,-c,d,-d, e, -e]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
# g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15]
# T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e}
# can = [4,6,8, 5,10,12, 0,7, 1,11, 2,9, 3,13, 15,14]
g = Permutation([8, 11, 5, 9, 13, 7, 1, 10, 3, 4, 2, 12, 0, 6, 14, 15])
base_f, gens_f = bsgs_direct_product(base1, gens1, base2a, gens2a)
base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
t0 = (base_f, gens_f, 2, 0)
t1 = (base_A, gens_A, 4, 0)
can = canonicalize(g, [list(range(4)), list(range(4, 14))], [0, 0], t0, t1)
assert can == [4, 6, 8, 5, 10, 12, 0, 7, 1, 11, 2, 9, 3, 13, 15, 14]
def test_riemann_invariants():
baser, gensr = riemann_bsgs
# R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1]; g = [0,2,3,1,4,5]
# T_c = -R^{d0 d1}_{d0 d1}; can = [0,2,1,3,5,4]
g = Permutation([0, 2, 3, 1, 4, 5])
can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0))
assert can == [0, 2, 1, 3, 5, 4]
# use a non minimal BSGS
can = canonicalize(g, list(range(2, 4)), 0, ([2, 0], [Permutation([1, 0, 2, 3, 5, 4]), Permutation([2, 3, 0, 1, 4, 5])], 1, 0))
assert can == [0, 2, 1, 3, 5, 4]
"""
The following tests in test_riemann_invariants and in
test_riemann_invariants1 have been checked using xperm.c from XPerm in
in [1] and with an older version contained in [2]
[1] xperm.c part of xPerm written by J. M. Martin-Garcia
http://www.xact.es/index.html
[2] test_xperm.cc in cadabra by Kasper Peeters, http://cadabra.phi-sci.com/
"""
# R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} *
# R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10}
# ord: contravariant d_k ->2*k, covariant d_k -> 2*k+1
# T_c = R^{d0 d1 d2 d3} * R_{d0 d1}^{d4 d5} * R_{d2 d3}^{d6 d7} *
# R_{d4 d5}^{d8 d9} * R_{d6 d7}^{d10 d11} * R_{d8 d9 d10 d11}
g = Permutation([23, 2, 1, 10, 12, 8, 0, 11, 15, 5, 17, 19, 21, 7, 13, 9, 4, 14, 22, 3, 16, 18, 6, 20, 24, 25])
can = canonicalize(g, list(range(24)), 0, (baser, gensr, 6, 0))
assert can == [0, 2, 4, 6, 1, 3, 8, 10, 5, 7, 12, 14, 9, 11, 16, 18, 13, 15, 20, 22, 17, 19, 21, 23, 24, 25]
# use a non minimal BSGS
can = canonicalize(g, list(range(24)), 0, ([2, 0], [Permutation([1, 0, 2, 3, 5, 4]), Permutation([2, 3, 0, 1, 4, 5])], 6, 0))
assert can == [0, 2, 4, 6, 1, 3, 8, 10, 5, 7, 12, 14, 9, 11, 16, 18, 13, 15, 20, 22, 17, 19, 21, 23, 24, 25]
g = Permutation([0, 2, 5, 7, 4, 6, 9, 11, 8, 10, 13, 15, 12, 14, 17, 19, 16, 18, 21, 23, 20, 22, 25, 27, 24, 26, 29, 31, 28, 30, 33, 35, 32, 34, 37, 39, 36, 38, 1, 3, 40, 41])
can = canonicalize(g, list(range(40)), 0, (baser, gensr, 10, 0))
assert can == [0, 2, 4, 6, 1, 3, 8, 10, 5, 7, 12, 14, 9, 11, 16, 18, 13, 15, 20, 22, 17, 19, 24, 26, 21, 23, 28, 30, 25, 27, 32, 34, 29, 31, 36, 38, 33, 35, 37, 39, 40, 41]
def test_riemann_invariants1():
baser, gensr = riemann_bsgs
g = Permutation([17, 44, 11, 3, 0, 19, 23, 15, 38, 4, 25, 27, 43, 36, 22, 14, 8, 30, 41, 20, 2, 10, 12, 28, 18, 1, 29, 13, 37, 42, 33, 7, 9, 31, 24, 26, 39, 5, 34, 47, 32, 6, 21, 40, 35, 46, 45, 16, 48, 49])
can = canonicalize(g, list(range(48)), 0, (baser, gensr, 12, 0))
assert can == [0, 2, 4, 6, 1, 3, 8, 10, 5, 7, 12, 14, 9, 11, 16, 18, 13, 15, 20, 22, 17, 19, 24, 26, 21, 23, 28, 30, 25, 27, 32, 34, 29, 31, 36, 38, 33, 35, 40, 42, 37, 39, 44, 46, 41, 43, 45, 47, 48, 49]
g = Permutation([0, 2, 4, 6, 7, 8, 10, 12, 14, 16, 18, 20, 19, 22, 24, 26, 5, 21, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 13, 48, 50, 52, 15, 49, 54, 56, 17, 33, 41, 58, 9, 23, 60, 62, 29, 35, 63, 64, 3, 45, 66, 68, 25, 37, 47, 57, 11, 31, 69, 70, 27, 39, 53, 72, 1, 59, 73, 74, 55, 61, 67, 76, 43, 65, 75, 78, 51, 71, 77, 79, 80, 81])
can = canonicalize(g, list(range(80)), 0, (baser, gensr, 20, 0))
assert can == [0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 20, 22, 24, 7, 26, 28, 30, 9, 15, 32, 34, 11, 36, 23, 38, 13, 40, 42, 44, 17, 39, 29, 46, 19, 48, 43, 50, 21, 45, 52, 54, 25, 56, 33, 58, 27, 60, 53, 62, 31, 51, 64, 66, 35, 65, 47, 68, 37, 70, 49, 72, 41, 74, 57, 76, 55, 67, 59, 78, 61, 69, 71, 75, 63, 79, 73, 77, 80, 81]
def test_riemann_products():
baser, gensr = riemann_bsgs
base1, gens1 = get_symmetric_group_sgs(1)
base2, gens2 = get_symmetric_group_sgs(2)
base2a, gens2a = get_symmetric_group_sgs(2, 1)
# R^{a b d0}_d0 = 0
g = Permutation([0, 1, 2, 3, 4, 5])
can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0))
assert can == 0
# R^{d0 b a}_d0 ; ord = [a,b,d0,-d0}; g = [2,1,0,3,4,5]
# T_c = -R^{a d0 b}_d0; can = [0,2,1,3,5,4]
g = Permutation([2, 1, 0, 3, 4, 5])
can = canonicalize(g, list(range(2, 4)), 0, (baser, gensr, 1, 0))
assert can == [0, 2, 1, 3, 5, 4]
# R^d1_d2^b_d0 * R^{d0 a}_d1^d2; ord=[a,b,d0,-d0,d1,-d1,d2,-d2]
# g = [4,7,1,3,2,0,5,6,8,9]
# T_c = -R^{a d0 d1 d2}* R^b_{d0 d1 d2}
# can = [0,2,4,6,1,3,5,7,9,8]
g = Permutation([4, 7, 1, 3, 2, 0, 5, 6, 8, 9])
can = canonicalize(g, list(range(2, 8)), 0, (baser, gensr, 2, 0))
assert can == [0, 2, 4, 6, 1, 3, 5, 7, 9, 8]
can1 = canonicalize_naive(g, list(range(2, 8)), 0, (baser, gensr, 2, 0))
assert can == can1
# A symmetric commuting
# R^{d6 d5}_d2^d1 * R^{d4 d0 d2 d3} * A_{d6 d0} A_{d3 d1} * A_{d4 d5}
# g = [12,10,5,2, 8,0,4,6, 13,1, 7,3, 9,11,14,15]
# T_c = -R^{d0 d1 d2 d3} * R_d0^{d4 d5 d6} * A_{d1 d4}*A_{d2 d5}*A_{d3 d6}
g = Permutation([12, 10, 5, 2, 8, 0, 4, 6, 13, 1, 7, 3, 9, 11, 14, 15])
can = canonicalize(g, list(range(14)), 0, ((baser, gensr, 2, 0)), (base2, gens2, 3, 0))
assert can == [0, 2, 4, 6, 1, 8, 10, 12, 3, 9, 5, 11, 7, 13, 15, 14]
# R^{d2 a0 a2 d0} * R^d1_d2^{a1 a3} * R^{a4 a5}_{d0 d1}
# ord = [a0,a1,a2,a3,a4,a5,d0,-d0,d1,-d1,d2,-d2]
# 0 1 2 3 4 5 6 7 8 9 10 11
# can = [0, 6, 2, 8, 1, 3, 7, 10, 4, 5, 9, 11, 12, 13]
# T_c = R^{a0 d0 a2 d1}*R^{a1 a3}_d0^d2*R^{a4 a5}_{d1 d2}
g = Permutation([10, 0, 2, 6, 8, 11, 1, 3, 4, 5, 7, 9, 12, 13])
can = canonicalize(g, list(range(6, 12)), 0, (baser, gensr, 3, 0))
assert can == [0, 6, 2, 8, 1, 3, 7, 10, 4, 5, 9, 11, 12, 13]
# can1 = canonicalize_naive(g, list(range(6,12)), 0, (baser, gensr, 3, 0))
# assert can == can1
# A^n_{i, j} antisymmetric in i,j
# A_m0^d0_a1 * A_m1^a0_d0; ord = [m0,m1,a0,a1,d0,-d0]
# g = [0,4,3,1,2,5,6,7]
# T_c = -A_{m a1}^d0 * A_m1^a0_d0
# can = [0,3,4,1,2,5,7,6]
base, gens = bsgs_direct_product(base1, gens1, base2a, gens2a)
dummies = list(range(4, 6))
g = Permutation([0, 4, 3, 1, 2, 5, 6, 7])
can = canonicalize(g, dummies, 0, (base, gens, 2, 0))
assert can == [0, 3, 4, 1, 2, 5, 7, 6]
# A^n_{i, j} symmetric in i,j
# A^m0_a0^d2 * A^n0_d2^d1 * A^n1_d1^d0 * A_{m0 d0}^a1
# ordering: first the free indices; then first n, then d
# ord=[n0,n1,a0,a1, m0,-m0,d0,-d0,d1,-d1,d2,-d2]
# 0 1 2 3 4 5 6 7 8 9 10 11]
# g = [4,2,10, 0,11,8, 1,9,6, 5,7,3, 12,13]
# if the dummy indices m_i and d_i were separated,
# one gets
# T_c = A^{n0 d0 d1} * A^n1_d0^d2 * A^m0^a0_d1 * A_m0^a1_d2
# can = [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 12, 13]
# If they are not, so can is
# T_c = A^{n0 m0 d0} A^n1_m0^d1 A^{d2 a0}_d0 A_d2^a1_d1
# can = [0, 4, 6, 1, 5, 8, 10, 2, 7, 11, 3, 9, 12, 13]
# case with single type of indices
base, gens = bsgs_direct_product(base1, gens1, base2, gens2)
dummies = list(range(4, 12))
g = Permutation([4, 2, 10, 0, 11, 8, 1, 9, 6, 5, 7, 3, 12, 13])
can = canonicalize(g, dummies, 0, (base, gens, 4, 0))
assert can == [0, 4, 6, 1, 5, 8, 10, 2, 7, 11, 3, 9, 12, 13]
# case with separated indices
dummies = [list(range(4, 6)), list(range(6, 12))]
sym = [0, 0]
can = canonicalize(g, dummies, sym, (base, gens, 4, 0))
assert can == [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 12, 13]
# case with separated indices with the second type of index
# with antisymmetric metric: there is a sign change
sym = [0, 1]
can = canonicalize(g, dummies, sym, (base, gens, 4, 0))
assert can == [0, 6, 8, 1, 7, 10, 4, 2, 9, 5, 3, 11, 13, 12]
def test_graph_certificate():
# test tensor invariants constructed from random regular graphs;
# checked graph isomorphism with networkx
def randomize_graph(size, g):
p = list(range(size))
random.shuffle(p)
g1a = {}
for k, v in g1.items():
g1a[p[k]] = [p[i] for i in v]
return g1a
g1 = {0: [2, 3, 7], 1: [4, 5, 7], 2: [0, 4, 6], 3: [0, 6, 7], 4: [1, 2, 5], 5: [1, 4, 6], 6: [2, 3, 5], 7: [0, 1, 3]}
g2 = {0: [2, 3, 7], 1: [2, 4, 5], 2: [0, 1, 5], 3: [0, 6, 7], 4: [1, 5, 6], 5: [1, 2, 4], 6: [3, 4, 7], 7: [0, 3, 6]}
c1 = graph_certificate(g1)
c2 = graph_certificate(g2)
assert c1 != c2
g1a = randomize_graph(8, g1)
c1a = graph_certificate(g1a)
assert c1 == c1a
g1 = {0: [8, 1, 9, 7], 1: [0, 9, 3, 4], 2: [3, 4, 6, 7], 3: [1, 2, 5, 6], 4: [8, 1, 2, 5], 5: [9, 3, 4, 7], 6: [8, 2, 3, 7], 7: [0, 2, 5, 6], 8: [0, 9, 4, 6], 9: [8, 0, 5, 1]}
g2 = {0: [1, 2, 5, 6], 1: [0, 9, 5, 7], 2: [0, 4, 6, 7], 3: [8, 9, 6, 7], 4: [8, 2, 6, 7], 5: [0, 9, 8, 1], 6: [0, 2, 3, 4], 7: [1, 2, 3, 4], 8: [9, 3, 4, 5], 9: [8, 1, 3, 5]}
c1 = graph_certificate(g1)
c2 = graph_certificate(g2)
assert c1 != c2
g1a = randomize_graph(10, g1)
c1a = graph_certificate(g1a)
assert c1 == c1a
|
|
#!/usr/bin/env python
"""
Signal I/O classes
==================
Classes for reading and writing numpy [1]_ arrays containing sampled
or time-encoded signals from and to HDF5 files using PyTables [2]_.
- ReadArray, WriteArray - I/O classes for basic types.
- ReadSignal, WriteSignal - I/O classes for sampled signals.
- ReadTimeEncodedSignal, WriteTimeEncodedSignal - I/O classes for time-encoded signals.
.. [1] http://numpy.scipy.org/
.. [2] http://www.pytables.com/
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
__all__ = ['ReadArray', 'WriteArray',
'ReadSignal', 'WriteSignal',
'ReadSampledSignal', 'WriteSampledSignal',
'ReadTimeEncodedSignal', 'WriteTimeEncodedSignal']
import warnings as w
import time
import tables as t
import numpy as np
# Suppress warnings provoked by the use of integers as HDF5 group names:
w.simplefilter('ignore', t.NaturalNameWarning)
class MissingDataError(AttributeError, LookupError):
"""The file does not possess any data segments."""
pass
class ReadArray:
"""
Read data array from HDF5 file.
A class for reading arrays of some elementary type saved in
an HDF5 file. More than one array may be stored in the file; the
class assumes that each array is stored as a child of a group with
an integer name.
Parameters
----------
filename : str
Name of input HDF5 file.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data araays stored in the file.
read(block_size=None, id=0)
Read a block of data of length `block_size` from data array `id`.
rewind(id=0)
Reset the data pointer for data array `id` to the first entry.
seek(offset, id=0)
Move the data pointer for data array `id` to the indicated offset.
"""
def __init__(self, filename, *args):
"""Open the specified file for reading."""
self.h5file = t.openFile(filename, 'r+')
# Retrieve the nodes corresponding to the data arrays:
self.data_node_list = self.get_data_nodes()
num_arrays = len(self.data_node_list)
if num_arrays == 0:
raise MissingDataError("file `%s` does not contain any data segments" % filename)
# Initialize read pointers:
self.pos = np.zeros(num_arrays, int)
def __del__(self):
"""Close the opened file before cleaning up."""
self.close()
def close(self):
"""Close the opened file."""
self.h5file.close()
def get_data_nodes(self):
"""Retrieve the nodes of the data arrays stored in the file."""
# Each array must be stored as
# self.h5file.root.ARRAY_NAME.data, where ARRAY_NAME is an
# integer:
data_node_list = []
for n in self.h5file.root:
try:
int(n._v_name)
except ValueError:
raise ValueError('file contains non-integer data name')
try:
node = n.data
except t.exceptions.NoSuchNodeError:
pass
else:
data_node_list.append(node)
return data_node_list
def read(self, block_size=None, id=0):
"""Read a block of data from the specified data array. If no
block size is specified, the returned block contains all data
from the current read pointer to the end of the array. If no
array identifier is specified, the data is read out of the
first array."""
if id >= len(self.data_node_list):
raise ValueError('array id out of range')
g = self.data_node_list[id]
try:
if block_size == None:
block_data = g.read(self.pos[id], len(g))
else:
block_data = g.read(self.pos[id],
self.pos[id]+block_size)
except IndexError:
return array((), g.atom.type)
else:
self.pos[id] += len(block_data)
return block_data
def rewind(self, id=0):
"""Reset the data pointer for the specified array to the
beginning of the array."""
self.pos[id] = 0
def seek(self, offset, id=0):
"""Move the data pointer for the specified array to a new
position."""
if offset < 0 or offset > len(self.data_node_list[id].data):
raise ValueError('invalid offset')
else:
self.pos[id] = offset
class WriteArray:
"""
Write data array to HDF5 file.
A class for writing arrays of some elementary type to an HDF
file. More than one array may be stored in the file; the class
assumes that each array is stored as a child of a group with an
integer name.
Parameters
----------
filename : str
Output file name.
atom_shape : tuple
Atom shape.
atom_type : dtype
Data type to use in array (e.g., `numpy.float64`).
num_arrays : int
Number of data arrays to write to file.
complevel : int, 0..9
Compression level; 0 disables compression, 9 corresponds to
maximum compression.
complib : {'zlib', 'lzo', 'bzip2'}
Compression filter used by pytables.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
write(block_data, id=0)
Write the specified block of data to data array `id`.
Notes
-----
If the file already contains fewer data arrays than `num_arrays`,
they will be preserved and new arrays will be initialized and
added to the file.
"""
def __init__(self, filename, atom_shape=(), atom_type=np.float64,
num_arrays=1, complevel=1, complib='lzo'):
self.h5file = t.openFile(filename, 'a')
if num_arrays == 0:
raise ValueError("file must contain at least one data array")
# If the file contains fewer than the requested number of data
# arrays, then create the requisite number of new ones:
self.data_node_list = self.get_data_nodes()
if len(self.data_node_list) < num_arrays:
filters = t.Filters(complevel=complevel, complib=complib)
for i in xrange(len(self.data_node_list), num_arrays):
self.__add_data(str(i), atom_shape, atom_type, filters)
def __del__(self):
"""Close the opened file before cleaning up."""
self.close()
def __add_data(self, name, atom_shape, atom_type, filters):
"""Add a new data array to the file."""
group_node = self.h5file.createGroup(self.h5file.root, name)
data_node = self.h5file.createEArray(group_node, 'data',
t.Atom.from_sctype(atom_type,
shape=atom_shape),
(0, ), filters=filters)
self.data_node_list.append(data_node)
def __del_data(self, name):
"""Delete the specified data array in the specified group (but
not the group itself) from the file."""
self.h5file.removeNode(self.h5file.root, '/' + name + '/data')
def close(self):
"""Close the opened file."""
self.h5file.close()
def get_data_nodes(self):
"""Retrieve the data array nodes stored within the file."""
# Each array must be stored as
# self.h5file.root.ARRAY_NAME.data, where ARRAY_NAME is an
# integer:
data_node_list = []
for n in self.h5file.root:
try:
int(n._v_name)
except ValueError:
raise ValueError('file contains non-integer data name')
try:
node = n.data
except t.exceptions.NoSuchNodeError:
pass
else:
data_node_list.append(node)
return data_node_list
def write(self, block_data, id=0):
"""Write the specified block of data to the specified data array."""
if id >= len(self.data_node_list):
raise ValueError('array id out of range')
try:
self.data_node_list[id].append(block_data)
except:
raise IOError('error writing data')
try:
self.data_node_list[id].flush()
except:
raise IOError('error flushing data')
class MissingDescriptorError(AttributeError, LookupError):
"""The saved signal file does not possess a descriptor."""
pass
class WrongDescriptorError(AttributeError, LookupError):
"""The saved signal file contains an incorrect descriptor."""
pass
class DescriptorDataMismatchError(AttributeError, LookupError):
"""The number of descriptors in the saved signal file differs from
the number of data arrays."""
pass
class SignalDescriptor(t.IsDescription):
"""Descriptor of saved signal."""
comment = t.StringCol(64, pos=1) # description of signal
def get_desc_defaults(desc):
"""Extract the default column values from a descriptor class.
Parameters
----------
desc : subclass of `tables.IsDescription`
Descriptor class.
Returns
-------
vals : list
List of default column values.
See Also
--------
tables.IsDescription
"""
if not issubclass(desc, t.IsDescription):
raise ValueError("argument is not a descriptor class")
vals = []
for key in desc.columns.keys():
vals.append(desc.columns[key].dflt)
return vals
def get_desc_types(desc):
"""Extract the dtypes of the columns of a descriptor class.
Parameters
----------
desc : subclass of `tables.IsDescription`
Descriptor class.
Returns
-------
types : list
List of column dtypes.
See Also
--------
tables.IsDescription
"""
if not issubclass(desc, t.IsDescription):
raise ValueError("argument is not a descriptor class")
types = []
for key in desc.columns.keys():
types.append(desc.columns[key].dtype)
return types
class ReadSignal(ReadArray):
"""
Read signal from HDF5 file.
A class for reading signals stored in an HDF5 file. A single
file may contain multiple signals. Each signal contains a data
array and a descriptor.
Parameters
----------
filename : str
Input file name.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
get_desc_nodes()
Retrieve the descriptor nodes of the data arrays stored in
the file.
read(block_size=None, id=0)
Read a block of data of length `block_size` from data array `id`.
read_desc(id=0)
Return the data in the descriptor of data array `id`.
rewind(id=0)
Reset the data pointer for data array `id` to the first entry.
seek(offset, id=0)
Move the data pointer for data array `id` to the indicated offset.
"""
def __init__(self, filename):
ReadArray.__init__(self, filename)
# Retrieve the data descriptors:
self.desc_node_list = self.get_desc_nodes()
if len(self.data_node_list) != len(self.desc_node_list):
raise DescriptorDataMismatchError("file `%s` contains " +
"differing numbers of descriptors and data arrays" % filename)
# Validate the descriptors:
self.__validate_descs()
def __validate_descs(self):
"""Validate the signal descriptors in the file. This method
may be implemented in subclasses as necessary."""
pass
def read_desc(self, id=0):
"""Return the data in the specified data descriptor as a list
of values."""
return self.desc_node_list[id].read()[0]
def get_desc_nodes(self):
"""Retrieve the signal descriptors stored within the file."""
# Each descriptor must be stored as
# self.h5file.root.ARRAY_NAME.descriptor, where ARRAY_NAME is an
# integer:
desc_node_list = []
for n in self.h5file.root:
try:
int(n._v_name)
except ValueError:
raise ValueError('file contains non-integer data name')
try:
node = n.descriptor
except t.exceptions.NoSuchNodeError:
pass
else:
desc_node_list.append(node)
return desc_node_list
class WriteSignal(WriteArray):
"""
Write signal to HDF5 file.
A class for writing signals to an HDF5 file. A single file may
contain multiple signals. Each array is stored as a child of a
group with an integer name.
Parameters
----------
filename : str
Output file name.
desc_vals : list of lists
Default descriptor values. Each descriptor's default values
must be specified as a separate list.
desc_defs : list of descriptor classes
Descriptor classes. Each class must be a child of
`tables.IsDescription`.
atom_type : dtype
Data type to use in array (e.g., `numpy.float64`).
complevel : int, 0..9
Compression level; 0 disables compression, 9 corresponds to
maximum compression.
complib : {'zlib', 'lzo', 'bzip2'}
Compression filter used by pytables.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
get_desc_nodes()
Retrieve the descriptor nodes of the data arrays stored in
the file.
write(block_data, id=0)
Write the specified block of data to data array `id`.
Notes
-----
If the file already contains fewer data arrays than `num_arrays`,
they will be preserved and new arrays will be initialized and
added to the file.
"""
def __init__(self, filename,
desc_vals=[get_desc_defaults(SignalDescriptor)],
desc_defs=[SignalDescriptor],
atom_type=np.float64,
complevel=1, complib='lzo'):
"""Open the specified file for writing. If the file already
contains data arrays, new arrays are added to bring the total
number up to the number of specified signal descriptors."""
# Make sure each data segment has a descriptor:
if len(desc_vals) != len(desc_defs):
raise ValueError('number of descriptor definitions does ' +
'not equal the number of descriptor data tuples')
# Validate the descriptors:
self.__validate_descs(desc_vals, desc_defs)
# Create the data arrays:
WriteArray.__init__(self, filename, (), atom_type,
len(desc_vals),
complevel, complib)
# When the number of specified descriptors exceeds the number
# actually in the file..
self.desc_node_list = self.get_desc_nodes()
if len(self.desc_node_list) < len(desc_vals):
# Remove any existing descriptors so that they can be
# replaced by the specified descriptors:
for i in xrange(len(self.desc_node_list)):
self.h5file.removeNode(self.h5file.root,
'/' + str(i) + '/descriptor')
# Create descriptors for the data segments:
for i in xrange(len(desc_vals)):
self.__add_desc(str(i), desc_defs[i], desc_vals[i])
def __validate_descs(self, desc_vals, desc_defs):
"""Validate the specified signal descriptors. This method
may be implemented in subclasses as necessary."""
pass
def __add_desc(self, name, desc_def, desc_val):
"""Add a new descriptor to the array in the specified group."""
desc_node = \
self.h5file.createTable(self.h5file.root.__getattr__(name),
'descriptor', desc_def, 'descriptor')
# Verify that the specified descriptor can accomodate the
# number of values that are to be stored in it:
if len(desc_def.columns) != len(desc_val):
raise ValueError("list of descriptor field values " +
"is of incorrect length")
else:
desc_node.append([tuple(desc_val)])
desc_node.flush()
self.desc_node_list.append(desc_node)
def get_desc_nodes(self):
"""Retrieve the signal descriptors stored within the file."""
# Each descriptor must be stored as
# self.h5file.root.ARRAY_NAME.descriptor, where ARRAY_NAME is an
# integer:
desc_node_list = []
for n in self.h5file.root:
try:
int(n._v_name)
except ValueError:
raise ValueError('file contains non-integer data name')
try:
node = n.descriptor
except t.exceptions.NoSuchNodeError:
pass
else:
desc_node_list.append(node)
return desc_node_list
class SampledSignalDescriptor(t.IsDescription):
"""Descriptor of saved sampled signal."""
comment = t.StringCol(64, pos=1) # description of signal
bw = t.FloatCol(pos=2, dflt=1.0) # bandwidth (rad/s)
dt = t.FloatCol(pos=3, dflt=1.0) # interval between samples (s)
class TimeEncodedSignalDescriptor(t.IsDescription):
"""Descriptor of saved time-encoded signal."""
comment = t.StringCol(64, pos=1) # description of signal
bw = t.FloatCol(pos=2, dflt=1.0) # bandwidth (rad/s)
dt = t.FloatCol(pos=3, dflt=1.0) # interval between samples (s)
b = t.FloatCol(pos=4, dflt=1.0) # bias
d = t.FloatCol(pos=5, dflt=1.0) # threshold
k = t.FloatCol(pos=6, dflt=1.0) # integration constant
class ReadSampledSignal(ReadSignal):
"""
Read a sampled signal from an HDF5 file.
A class for reading sampled signals stored in an HDF5 file. A
single file may contain multiple signals. Each signal contains a
data array and a descriptor.
Parameters
----------
filename : str
Input file name.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
get_desc_nodes()
Retrieve the descriptor nodes of the data arrays stored in
the file.
read(block_size=None, id=0)
Read a block of data of length `block_size` from data array `id`.
read_desc(id=0)
Return the data in the descriptor of data array `id`.
rewind(id=0)
Reset the data pointer for data array `id` to the first entry.
seek(offset, id=0)
Move the data pointer for data array `id` to the indicated offset.
"""
def __validate_descs(self):
"""Validate the descriptors in the file by making sure that
they possess the same columns as the SampledSignalDescriptor
class."""
for desc_node in self.desc_node_list:
try:
assert set(desc_node.colnames) == \
set(SampledSignalDescriptor.columns.keys())
except AssertionError:
raise WrongDescriptorError("file `%s` contains " +
"an unrecognized descriptor" % filename)
class WriteSampledSignal(WriteSignal):
"""
Write a sampled signal to an HDF5 file.
A class for writing sampled signals to an HDF5 file. A single
file may contain multiple signals. Each signal is stored as a
child of a group with an integer name.
Parameters
----------
filename : str
Output file name.
desc_vals : list of lists
Default descriptor values. Each descriptor's default values
must be specified as a separate list.
atom_type : dtype
Data type to use in array (e.g., `numpy.float64`).
complevel : int, 0..9
Compression level; 0 disables compression, 9 corresponds to
maximum compression.
complib : {'zlib', 'lzo', 'bzip2'}
Compression filter used by pytables.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
get_desc_nodes()
Retrieve the descriptor nodes of the data arrays stored in
the file.
write(block_data, id=0)
Write the specified block of data to data array `id`.
Notes
-----
If the file already contains fewer data arrays than `num_arrays`,
they will be preserved and new arrays will be initialized and
added to the file.
"""
def __init__(self, filename,
desc_vals=[get_desc_defaults(SampledSignalDescriptor)],
atom_type=np.float64,
complevel=1, complib='lzo'):
"""Open the specified file for writing. If the file already
contains data arrays, new arrays are added to bring the total
number up to the number of specified signal descriptors. """
WriteSignal.__init__(self, filename, desc_vals,
[SampledSignalDescriptor]*len(desc_vals),
atom_type, complevel, complib)
def __validate_descs(self, desc_vals, desc_defs):
"""Validate the specified signal descriptors and values by
making sure that they agree."""
for (desc_val, desc_def) in zip(desc_vals, desc_defs):
if map(type, desc_val) != get_desc_types(desc_def):
raise WrongDescriptorError("descriptor values do not match format")
class ReadTimeEncodedSignal(ReadSignal):
"""
Read a time-encoded signal from an HDF5 file.
A class for reading time-encoded signals stored in an HDF5
file. A single file may contain multiple signals. Each signal
contains a data array and a descriptor.
Parameters
----------
filename : str
Input file name.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
get_desc_nodes()
Retrieve the descriptor nodes of the data arrays stored in
the file.
read(block_size=None, id=0)
Read a block of data of length `block_size` from data array `id`.
read_desc(id=0)
Return the data in the descriptor of data array `id`.
rewind(id=0)
Reset the data pointer for data array `id` to the first entry.
seek(offset, id=0)
Move the data pointer for data array `id` to the indicated offset.
"""
def __validate_descs(self):
"""Validate the descriptors in the file by making sure that
they possess the same columns as the
TimeEncodedSignalDescriptor class."""
for desc_node in self.desc_node_list:
try:
assert set(desc_node.colnames) == \
set(TimeEncodedSignalDescriptor.columns.keys())
except AssertionError:
raise WrongDescriptorError("file `%s` contains " +
"an unrecognized descriptor" % filename)
class WriteTimeEncodedSignal(WriteSignal):
"""
Write a time-encoded signal to an HDF5 file.
A class for writing time-encoded signals to HDF5 files. A
single file may contain multiple signals. Each signal is stored as
a child of a group with an integer name.
Parameters
----------
filename : str
Output file name.
desc_vals : list of lists
Default descriptor values. Each descriptor's default values
must be specified as a separate list.
atom_type : dtype
Data type to use in array (e.g., `numpy.float64`).
complevel : int, 0..9
Compression level; 0 disables compression, 9 corresponds to
maximum compression.
complib : {'zlib', 'lzo', 'bzip2'}
Compression filter used by pytables.
Methods
-------
close()
Close the opened file.
get_data_nodes()
Retrieve the nodes of the data arrays stored in the file.
get_desc_nodes()
Retrieve the descriptor nodes of the data arrays stored in
the file.
write(block_data, id=0)
Write the specified block of data to data array `id`.
Notes
-----
If the file already contains fewer data arrays than `num_arrays`,
they will be preserved and new arrays will be initialized and
added to the file.
"""
def __init__(self, filename,
desc_vals=[get_desc_defaults(TimeEncodedSignalDescriptor)],
atom_type=np.float64, complevel=1, complib='lzo'):
"""Open the specified file for writing. If the file already
contains data arrays, new arrays are added to bring the total
number up to the number of specified signal descriptors. """
WriteSignal.__init__(self, filename, desc_vals,
[TimeEncodedSignalDescriptor]*len(desc_vals),
atom_type, complevel, complib)
def __validate_descs(self, desc_vals, desc_defs):
"""Validate the specified signal descriptors and values by
making sure that they agree."""
for (desc_val, desc_def) in zip(desc_vals, desc_defs):
if map(type, desc_val) != get_desc_types(desc_def):
raise WrongDescriptorError("descriptor values do not match format")
if __name__ == '__main__':
# Short demo of how to use the above classes:
from os import remove
from tempfile import mktemp
from atexit import register
# Write to a file:
file_name = mktemp() + '.h5'
N = 1000
x1 = np.random.rand(N)
x2 = np.random.rand(N)
w = WriteArray(file_name, num_arrays=2)
w.write(x1)
w.write(x2,id=1)
w.close()
# Clean up on exit:
register(remove, file_name)
# Read the data from the file:
r = ReadArray(file_name)
y1 = r.read()
y2 = r.read(id=1)
r.close()
assert np.all(x1 == y1)
assert np.all(x2 == y2)
|
|
# Copyright (c) 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in group type properties."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
import webob
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_CGSNAPSHOT_TYPE = "default_cgsnapshot_type"
def create(context,
name,
group_specs=None,
is_public=True,
projects=None,
description=None):
"""Creates group types."""
group_specs = group_specs or {}
projects = projects or []
elevated = context if context.is_admin else context.elevated()
try:
type_ref = db.group_type_create(elevated,
dict(name=name,
group_specs=group_specs,
is_public=is_public,
description=description),
projects=projects)
except db_exc.DBError:
LOG.exception('DB error:')
raise exception.GroupTypeCreateFailed(name=name,
group_specs=group_specs)
return type_ref
def update(context, id, name, description, is_public=None):
"""Update group type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidGroupType(reason=msg)
elevated = context if context.is_admin else context.elevated()
try:
db.group_type_update(elevated, id,
dict(name=name, description=description,
is_public=is_public))
except db_exc.DBError:
LOG.exception('DB error:')
raise exception.GroupTypeUpdateFailed(id=id)
def destroy(context, id):
"""Marks group types as deleted."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidGroupType(reason=msg)
else:
elevated = context if context.is_admin else context.elevated()
try:
db.group_type_destroy(elevated, id)
except exception.GroupTypeInUse as e:
msg = _('Target group type is still in use. %s') % six.text_type(e)
raise webob.exc.HTTPBadRequest(explanation=msg)
def get_all_group_types(context, inactive=0, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all non-deleted group_types.
Pass true as argument if you want deleted group types returned also.
"""
grp_types = db.group_type_get_all(context, inactive, filters=filters,
marker=marker, limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
return grp_types
def get_group_type(ctxt, id, expected_fields=None):
"""Retrieves single group type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidGroupType(reason=msg)
if ctxt is None:
ctxt = context.get_admin_context()
return db.group_type_get(ctxt, id, expected_fields=expected_fields)
def get_group_type_by_name(context, name):
"""Retrieves single group type by name."""
if name is None:
msg = _("name cannot be None")
raise exception.InvalidGroupType(reason=msg)
return db.group_type_get_by_name(context, name)
def get_default_group_type():
"""Get the default group type."""
name = CONF.default_group_type
grp_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
grp_type = get_group_type_by_name(ctxt, name)
except exception.GroupTypeNotFoundByName:
# Couldn't find group type with the name in default_group_type
# flag, record this issue and move on
LOG.exception('Default group type is not found. '
'Please check default_group_type config.')
return grp_type
def get_default_cgsnapshot_type():
"""Get the default group type for migrating cgsnapshots.
Get the default group type for migrating consistencygroups to
groups and cgsnapshots to group_snapshots.
"""
grp_type = {}
ctxt = context.get_admin_context()
try:
grp_type = get_group_type_by_name(ctxt, DEFAULT_CGSNAPSHOT_TYPE)
except exception.GroupTypeNotFoundByName:
# Couldn't find DEFAULT_CGSNAPSHOT_TYPE group type.
# Record this issue and move on.
LOG.exception('Default cgsnapshot type %s is not found.',
DEFAULT_CGSNAPSHOT_TYPE)
return grp_type
def is_default_cgsnapshot_type(group_type_id):
cgsnap_type = get_default_cgsnapshot_type()
return group_type_id == cgsnap_type['id']
def get_group_type_specs(group_type_id, key=False):
group_type = get_group_type(context.get_admin_context(),
group_type_id)
group_specs = group_type['group_specs']
if key:
if group_specs.get(key):
return group_specs.get(key)
else:
return False
else:
return group_specs
def is_public_group_type(context, group_type_id):
"""Return is_public boolean value of group type"""
group_type = db.group_type_get(context, group_type_id)
return group_type['is_public']
def add_group_type_access(context, group_type_id, project_id):
"""Add access to group type for project_id."""
if group_type_id is None:
msg = _("group_type_id cannot be None")
raise exception.InvalidGroupType(reason=msg)
elevated = context if context.is_admin else context.elevated()
if is_public_group_type(elevated, group_type_id):
msg = _("Type access modification is not applicable to public group "
"type.")
raise exception.InvalidGroupType(reason=msg)
return db.group_type_access_add(elevated, group_type_id, project_id)
def remove_group_type_access(context, group_type_id, project_id):
"""Remove access to group type for project_id."""
if group_type_id is None:
msg = _("group_type_id cannot be None")
raise exception.InvalidGroupType(reason=msg)
elevated = context if context.is_admin else context.elevated()
if is_public_group_type(elevated, group_type_id):
msg = _("Type access modification is not applicable to public group "
"type.")
raise exception.InvalidGroupType(reason=msg)
return db.group_type_access_remove(elevated, group_type_id, project_id)
|
|
'''
The one parameter exponential family distributions used by GLM.
'''
#TODO: quasi, quasibinomial, quasipoisson
#see http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import numpy as np
from scipy import special
from scipy.stats import ss
import links as L
import varfuncs as V
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
"""
#TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
tol = 1.0e-05
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
"""
#TODO: change the links class attribute in the families to hold meaningful
# information instead of a list of links instances such as
#[<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
# validlink = max([isinstance(link, _.__class__) for _ in self.links])
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (`self.links`, link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
#link property for each family
#pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
self.link = link()
self.variance = variance
def starting_mu(self, y):
"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
mu_0 = (endog + mean(endog))/2.
Notes
-----
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
`w` = 1 / (link'(`mu`)**2 * variance(`mu`))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, Y, mu, scale=1.):
"""
Deviance of (Y,mu) pair.
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
Y : array-like
The endogenous response variable
mu : array-like
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional scale argument
Returns
-------
DEV : array
The value of deviance function defined below.
Notes
-----
DEV = (sum_i(2*loglike(Y_i,Y_i) - 2*loglike(Y_i,mu_i)) / scale
The deviance functions are analytically defined for each family.
"""
raise NotImplementedError
def resid_dev(self, Y, mu, scale=1.):
"""
The deviance residuals
Parameters
----------
Y : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
Deviance residuals.
Notes
-----
The deviance residuals are defined for each family.
"""
raise NotImplementedError
def fitted(self, eta):
"""
Fitted values based on linear predictors eta.
Parameters
-----------
eta : array
Values of the linear predictor of the model.
dot(X,beta) in a classical linear model.
Returns
--------
mu : array
The mean response variables given by the inverse of the link
function.
"""
return self.link.inverse(eta)
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
eta : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike(self, Y, mu, scale=1.):
"""
The loglikelihood function.
Parameters
----------
`Y` : array
Usually the endogenous response variable.
`mu` : array
Usually but not always the fitted mean response variable.
Returns
-------
llf : float
The value of the loglikelihood evaluated at (Y,mu).
Notes
-----
This is defined for each family. Y and mu are not restricted to
`Y` and `mu` respectively. For instance, the deviance function calls
both loglike(Y,Y) and loglike(Y,mu) to get the likelihood ratio.
"""
raise NotImplementedError
def resid_anscombe(self, Y, mu):
"""
The Anscome residuals.
See also
--------
statsmodels.families.family.Family docstring and the `resid_anscombe` for
the individual families for more information.
"""
raise NotImplementedError
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
`variance` is an instance of
statsmodels.genmod.families.family.varfuncs.mu
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
def __init__(self, link=L.log):
self.variance = Poisson.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""Poisson deviance residual
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
resid_dev = sign(Y-mu)*sqrt(2*Y*log(Y/mu)-2*(Y-mu))
"""
return np.sign(Y-mu) * np.sqrt(2*Y*np.log(Y/mu)-2*(Y-mu))/scale
def deviance(self, Y, mu, scale=1.):
'''
Poisson deviance function
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (Y,mu) as defined below.
Notes
-----
If a constant term is included it is defined as
:math:`deviance = 2*\\sum_{i}(Y*\\log(Y/\\mu))`
'''
if np.any(Y==0):
retarr = np.zeros(Y.shape)
Ymu = Y/mu
mask = Ymu != 0
YmuMasked = Ymu[mask]
Ymasked = Y[mask]
np.putmask(retarr, mask, Ymasked*np.log(YmuMasked)/scale)
return 2*np.sum(retarr)
else:
return 2*np.sum(Y*np.log(Y/mu))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Poisson exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
llf = scale * sum(-mu + Y*log(mu) - gammaln(Y+1))
where gammaln is the log gamma function
"""
return scale * np.sum(-mu + Y*np.log(mu)-special.gammaln(Y+1))
def resid_anscombe(self, Y, mu):
"""
Anscombe residuals for the Poisson exponential family distribution
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
resid_anscombe = :math:`(3/2.)*(Y^{2/3.} - \\mu**(2/3.))/\\mu^{1/6.}`
"""
return (3/2.)*(Y**(2/3.)-mu**(2/3.))/mu**(1/6.)
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.constant
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
def __init__(self, link=L.identity):
self.variance = Gaussian.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""
Gaussian deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
--------
`resid_dev` = (`Y` - `mu`)/sqrt(variance(`mu`))
"""
return (Y - mu) / np.sqrt(self.variance(mu))/scale
def deviance(self, Y, mu, scale=1.):
"""
Gaussian deviance function
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (Y,mu) as defined below.
Notes
--------
`deviance` = sum((Y-mu)**2)
"""
return np.sum((Y-mu)**2)/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Gaussian exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
llf = -(nobs/2)*(log(SSR) + (1 + log(2*pi/nobs)))
where SSR = sum((Y-link^(-1)(mu))**2)
If the links is not the identity link then the loglikelihood
function is defined as
llf = sum((`Y`*`mu`-`mu`**2/2)/`scale` - `Y`**2/(2*`scale`) - \
(1/2.)*log(2*pi*`scale`))
"""
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = Y.shape[0]/2.
SSR = ss(Y-self.fitted(mu))
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
# Return the loglikelihood for Gaussian GLM
return np.sum((Y*mu-mu**2/2)/scale-Y**2/(2*scale)-\
.5*np.log(2*np.pi*scale))
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the Gaussian exponential family distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
--------
`resid_anscombe` = `Y` - `mu`
"""
return Y-mu
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_squared
See also
--------
statsmodels.genmod.families.family.Family
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
def __init__(self, link=L.inverse_power):
self.variance = Gamma.variance
self.link = link()
#TODO: note the note
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, 1.0e-10, np.inf)
def deviance(self, Y, mu, scale=1.):
"""
Gamma deviance function
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = 2*sum((Y - mu)/mu - log(Y/mu))
"""
Y_mu = self._clean(Y/mu)
return 2 * np.sum((Y - mu)/mu - np.log(Y_mu))
def resid_dev(self, Y, mu, scale=1.):
"""
Gamma deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`resid_dev` = sign(Y - mu) * sqrt(-2*(-(Y-mu)/mu + log(Y/mu)))
"""
Y_mu = self._clean(Y/mu)
return np.sign(Y-mu) * np.sqrt(-2*(-(Y-mu)/mu + np.log(Y_mu)))
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Gamma exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
--------
llf = -1/scale * sum(Y/mu + log(mu) + (scale-1)*log(Y) + log(scale) +\
scale*gammaln(1/scale))
where gammaln is the log gamma function.
"""
return - 1./scale * np.sum(Y/mu+np.log(mu)+(scale-1)*np.log(Y)\
+np.log(scale)+scale*special.gammaln(1./scale))
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs. our
# results due to an assumed difference in implementation
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for Gamma exponential family distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
resid_anscombe = 3*(Y**(1/3.)-mu**(1/3.))/mu**(1/3.)
"""
return 3*(Y**(1/3.)-mu**(1/3.))/mu**(1/3.)
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.binary
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
endog for Binomial can be specified in one of three ways.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog]
variance = V.binary # this is not used below in an effort to include n
def __init__(self, link=L.logit): #, n=1.):
#TODO: it *should* work for a constant n>1 actually, if data_weights is
# equal to n
self.n = 1 # overwritten by initialize if needed but
# always used to initialize variance
# since Y is assumed/forced to be (0,1)
self.variance = V.Binomial(n=self.n)
self.link = link()
def starting_mu(self, y):
"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is
starting_mu = (y + .5)/2
"""
return (y + .5)/2
def initialize(self, Y):
'''
Initialize the response variable.
Parameters
----------
Y : array
Endogenous response variable
Returns
--------
If `Y` is binary, returns `Y`
If `Y` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
if (Y.ndim > 1 and Y.shape[1] > 1):
y = Y[:,0]
self.n = Y.sum(1) # overwrite self.n for deviance below
return y*1./self.n
else:
return Y
def deviance(self, Y, mu, scale=1.):
'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
Y : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
--------
deviance : float
The deviance function as defined below
Notes
-----
If the endogenous variable is binary:
`deviance` = -2*sum(I_one * log(mu) + (I_zero)*log(1-mu))
where I_one is an indicator function that evalueates to 1 if Y_i == 1.
and I_zero is an indicator function that evaluates to 1 if Y_i == 0.
If the model is ninomial:
`deviance` = 2*sum(log(Y/mu) + (n-Y)*log((n-Y)/(n-mu)))
where Y and n are as defined in Binomial.initialize.
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(Y,1)
return -2 * np.sum(one * np.log(mu+1e-200) + (1-one) * np.log(1-mu+1e-200))
else:
return 2*np.sum(self.n*(Y*np.log(Y/mu+1e-200)+(1-Y)*np.log((1-Y)/(1-mu)+1e-200)))
def resid_dev(self, Y, mu, scale=1.):
"""
Binomial deviance residuals
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
If `Y` is binary:
resid_dev = sign(Y-mu)*sqrt(-2*log(I_one*mu + I_zero*(1-mu)))
where I_one is an indicator function that evaluates as 1 if Y == 1
and I_zero is an indicator function that evaluates as 1 if Y == 0.
If `Y` is binomial:
resid_dev = sign(Y-mu)*sqrt(2*n*(Y*log(Y/mu)+(1-Y)*log((1-Y)/(1-mu))))
where Y and n are as defined in Binomial.initialize.
"""
mu = self.link._clean(mu)
if np.shape(self.n) == () and self.n == 1:
one = np.equal(Y,1)
return np.sign(Y-mu)*np.sqrt(-2*np.log(one*mu+(1-one)*(1-mu)))\
/scale
else:
return np.sign(Y-mu) * np.sqrt(2*self.n*(Y*np.log(Y/mu+1e-200)+(1-Y)*\
np.log((1-Y)/(1-mu)+1e-200)))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for Binomial exponential family distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
--------
If `Y` is binary:
`llf` = scale*sum(Y*log(mu/(1-mu))+log(1-mu))
If `Y` is binomial:
`llf` = scale*sum(gammaln(n+1) - gammaln(y+1) - gammaln(n-y+1) +\
y*log(mu/(1-mu)) + n*log(1-mu)
where gammaln is the log gamma function and y = Y*n with Y and n
as defined in Binomial initialize. This simply makes y the original
number of successes.
"""
if np.shape(self.n) == () and self.n == 1:
return scale*np.sum(Y*np.log(mu/(1-mu)+1e-200)+np.log(1-mu))
else:
y=Y*self.n #convert back to successes
return scale * np.sum(special.gammaln(self.n+1)-\
special.gammaln(y+1)-special.gammaln(self.n-y+1)\
+y*np.log(mu/(1-mu))+self.n*np.log(1-mu))
def resid_anscombe(self, Y, mu):
'''
The Anscombe residuals
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
sqrt(n)*(cox_snell(Y)-cox_snell(mu))/(mu**(1/6.)*(1-mu)**(1/6.))
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
cox_snell(x) = x**(2/3.)/(2/3.)*hyp2f1(2/3.,1/3.,5/3.,x)
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
cox_snell = lambda x: special.betainc(2/3., 2/3., x)\
*special.beta(2/3.,2/3.)
return np.sqrt(self.n)*(cox_snell(Y)-cox_snell(mu))/\
(mu**(1/6.)*(1-mu)**(1/6.))
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_cubed
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
def __init__(self, link=L.inverse_squared):
self.variance = InverseGaussian.variance
self.link = link()
def resid_dev(self, Y, mu, scale=1.):
"""
Returns the deviance residuals for the inverse Gaussian family.
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`dev_resid` = sign(Y-mu)*sqrt((Y-mu)**2/(Y*mu**2))
"""
return np.sign(Y-mu) * np.sqrt((Y-mu)**2/(Y*mu**2))/scale
def deviance(self, Y, mu, scale=1.):
"""
Inverse Gaussian deviance function
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum((Y=mu)**2/(Y*mu**2))
"""
return np.sum((Y-mu)**2/(Y*mu**2))/scale
def loglike(self, Y, mu, scale=1.):
"""
Loglikelihood function for inverse Gaussian distribution.
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
`llf` = -(1/2.)*sum((Y-mu)**2/(Y*mu**2*scale) + log(scale*Y**3)\
+ log(2*pi))
"""
return -.5 * np.sum((Y-mu)**2/(Y*mu**2*scale)\
+ np.log(scale*Y**3) + np.log(2*np.pi))
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the inverse Gaussian distribution
Parameters
----------
Y : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
`resid_anscombe` = log(Y/mu)/sqrt(mu)
"""
return np.log(Y/mu)/np.sqrt(mu)
class NegativeBinomial(Family):
"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now `alpha` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between .01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.nbinom
See also
--------
statsmodels.genmod.families.family.Family
Notes
-----
Support for Power link functions is not yet supported.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
#TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
def __init__(self, link=L.log, alpha=1.):
self.alpha = alpha
self.variance = V.NegativeBinomial(alpha=self.alpha)
if isinstance(link, L.NegativeBinomial):
self.link = link(alpha=self.alpha)
else:
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, 1.0e-10, np.inf)
def deviance(self, Y, mu, scale=1.):
"""
Returns the value of the deviance function.
Parameters
-----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum(piecewise)
where piecewise is defined as
if :math:`Y_{i} == 0:`
piecewise_i = :math:`2\\log\\left(1+\\alpha*\\mu\\right)/\\alpha`
if :math:`Y_{i} > 0`:
piecewise_i = :math:`2 Y \\log(Y/\\mu)-2/\\alpha(1+\\alpha Y)*\\log((1+\\alpha Y)/(1+\\alpha\\mu))`
"""
iszero = np.equal(Y,0)
notzero = 1 - iszero
tmp = np.zeros(len(Y))
Y_mu = self._clean(Y/mu)
tmp = iszero*2*np.log(1+self.alpha*mu)/self.alpha
tmp += notzero*(2*Y*np.log(Y_mu)-2/self.alpha*(1+self.alpha*Y)*\
np.log((1+self.alpha*Y)/(1+self.alpha*mu)))
return np.sum(tmp)/scale
def resid_dev(self, Y, mu, scale=1.):
'''
Negative Binomial Deviance Residual
Parameters
----------
Y : array-like
`Y` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by scale
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
`resid_dev` = sign(Y-mu) * sqrt(piecewise)
where piecewise is defined as
if :math:`Y_i = 0`:
:math:`piecewise_i = 2*log(1+alpha*mu)/alpha`
if :math:`Y_i > 0`:
:math:`piecewise_i = 2*Y*log(Y/\\mu)-2/\\alpha*(1+\\alpha*Y)*log((1+\\alpha*Y)/(1+\\alpha*\\mu))`
'''
iszero = np.equal(Y,0)
notzero = 1 - iszero
tmp=np.zeros(len(Y))
tmp = iszero*2*np.log(1+self.alpha*mu)/self.alpha
tmp += notzero*(2*Y*np.log(Y/mu)-2/self.alpha*(1+self.alpha*Y)*\
np.log((1+self.alpha*Y)/(1+self.alpha*mu)))
return np.sign(Y-mu)*np.sqrt(tmp)/scale
def loglike(self, Y, fittedvalues=None):
"""
The loglikelihood function for the negative binomial family.
Parameters
----------
Y : array-like
Endogenous response variable
fittedvalues : array-like
The linear fitted values of the model. This is dot(exog,params).
Returns
-------
llf : float
The value of the loglikelihood function evaluated at (Y,mu,scale)
as defined below.
Notes
-----
sum(Y*log(alpha*exp(fittedvalues)/(1+alpha*exp(fittedvalues))) -\
log(1+alpha*exp(fittedvalues))/alpha + constant)
where constant is defined as
constant = gammaln(Y + 1/alpha) - gammaln(Y + 1) - gammaln(1/alpha)
"""
# don't need to specify mu
if fittedvalues is None:
raise AttributeError('The loglikelihood for the negative binomial \
requires that the fitted values be provided via the `fittedvalues` keyword \
argument.')
constant = special.gammaln(Y + 1/self.alpha) - special.gammaln(Y+1)\
-special.gammaln(1/self.alpha)
return np.sum(Y*np.log(self.alpha*np.exp(fittedvalues)/\
(1 + self.alpha*np.exp(fittedvalues))) - \
np.log(1+self.alpha*np.exp(fittedvalues))/self.alpha\
+ constant)
def resid_anscombe(self, Y, mu):
"""
The Anscombe residuals for the negative binomial family
Parameters
----------
Y : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
`resid_anscombe` = (hyp2f1(-alpha*Y)-hyp2f1(-alpha*mu)+\
1.5*(Y**(2/3.)-mu**(2/3.)))/(mu+alpha*mu**2)**(1/6.)
where hyp2f1 is the hypergeometric 2f1 function parameterized as
hyp2f1(x) = hyp2f1(2/3.,1/3.,5/3.,x)
"""
hyp2f1 = lambda x : special.hyp2f1(2/3.,1/3.,5/3.,x)
return (hyp2f1(-self.alpha*Y)-hyp2f1(-self.alpha*mu)+1.5*(Y**(2/3.)-\
mu**(2/3.)))/(mu+self.alpha*mu**2)**(1/6.)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.summary.scalar("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
|
#!/usr/bin/env python3
#
# Copyright 2018 Red Hat, Inc.
#
# Authors:
# Paolo Bonzini <pbonzini@redhat.com>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from collections import OrderedDict
import json
from django.contrib.auth.models import User
from api.models import Message
from api.rest import AddressSerializer
from .patchewtest import PatchewTestCase, main
try:
import coreapi
except ImportError:
coreapi = None
class RestTest(PatchewTestCase):
def setUp(self):
self.create_superuser()
self.p = self.add_project("QEMU", "qemu-devel@nongnu.org")
self.PROJECT_BASE = "%sprojects/%d/" % (self.REST_BASE, self.p.id)
self.sp = self.add_project("QEMU Block Layer", "qemu-block@nongnu.org")
self.sp.parent_project = self.p
self.sp.prefix_tags = "block"
self.sp.save()
self.SUBPROJECT_BASE = "%sprojects/%d/" % (self.REST_BASE, self.sp.id)
self.p2 = self.add_project("EDK 2", "edk2-devel@lists.01.org")
self.PROJECT_BASE_2 = "%sprojects/%d/" % (self.REST_BASE, self.p2.id)
self.admin = User.objects.get(username="admin")
self.USER_BASE = "%susers/%d/" % (self.REST_BASE, self.admin.id)
def test_root(self):
resp = self.api_client.get(self.REST_BASE)
self.assertEquals(resp.data["users"], self.REST_BASE + "users/")
self.assertEquals(resp.data["projects"], self.REST_BASE + "projects/")
self.assertEquals(resp.data["series"], self.REST_BASE + "series/")
resp = self.api_client.get(self.REST_BASE, HTTP_HOST="patchew.org")
self.assertEquals(resp.data["users"], "http://patchew.org/api/v1/users/")
self.assertEquals(resp.data["projects"], "http://patchew.org/api/v1/projects/")
self.assertEquals(resp.data["series"], "http://patchew.org/api/v1/series/")
def test_users(self):
resp = self.api_client.get(self.REST_BASE + "users/")
self.assertEquals(resp.data["count"], 1)
self.assertEquals(resp.data["results"][0]["resource_uri"], self.USER_BASE)
self.assertEquals(resp.data["results"][0]["username"], self.admin.username)
def test_user(self):
resp = self.api_client.get(self.USER_BASE)
self.assertEquals(resp.data["resource_uri"], self.USER_BASE)
self.assertEquals(resp.data["username"], self.admin.username)
def test_projects(self):
resp = self.api_client.get(self.REST_BASE + "projects/")
self.assertEquals(resp.data["count"], 3)
self.assertEquals(resp.data["results"][0]["resource_uri"], self.PROJECT_BASE)
self.assertEquals(resp.data["results"][0]["name"], "QEMU")
self.assertEquals(
resp.data["results"][0]["mailing_list"], "qemu-devel@nongnu.org"
)
self.assertEquals(resp.data["results"][1]["resource_uri"], self.SUBPROJECT_BASE)
self.assertEquals(resp.data["results"][1]["name"], "QEMU Block Layer")
self.assertEquals(
resp.data["results"][1]["mailing_list"], "qemu-block@nongnu.org"
)
self.assertEquals(resp.data["results"][1]["parent_project"], self.PROJECT_BASE)
def test_project(self):
resp = self.api_client.get(self.PROJECT_BASE)
self.assertEquals(resp.data["resource_uri"], self.PROJECT_BASE)
self.assertEquals(resp.data["name"], "QEMU")
self.assertEquals(resp.data["mailing_list"], "qemu-devel@nongnu.org")
resp = self.api_client.get(self.SUBPROJECT_BASE)
self.assertEquals(resp.data["resource_uri"], self.SUBPROJECT_BASE)
self.assertEquals(resp.data["name"], "QEMU Block Layer")
self.assertEquals(resp.data["mailing_list"], "qemu-block@nongnu.org")
self.assertEquals(resp.data["parent_project"], self.PROJECT_BASE)
def test_project_by_name(self):
resp = self.api_client.get(self.REST_BASE + "projects/by-name/QEMU/")
self.assertEquals(resp.status_code, 307)
resp = self.api_client.get(resp["Location"])
self.assertEquals(resp.data["resource_uri"], self.PROJECT_BASE)
self.assertEquals(resp.data["name"], "QEMU")
self.assertEquals(resp.data["mailing_list"], "qemu-devel@nongnu.org")
resp = self.api_client.get(
self.REST_BASE + "projects/by-name/QEMU/?some=thing&foo=bar"
)
self.assertEquals(resp.status_code, 307)
self.assertIn("some=thing", resp["Location"])
self.assertIn("foo=bar", resp["Location"])
def test_project_config_get(self):
self.p.config = {"git": {"push_to": "/tmp/aaa"}}
self.p.save()
resp = self.api_client.get(self.PROJECT_BASE + "config/")
self.assertEquals(resp.status_code, 401)
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.get(self.PROJECT_BASE + "config/")
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["git"]["push_to"], "/tmp/aaa")
def test_project_config_put(self):
new_config = {"git": {"push_to": "/tmp/bbb"}}
resp = self.api_client.put(
self.PROJECT_BASE + "config/", new_config, format="json"
)
self.assertEquals(resp.status_code, 401)
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.put(
self.PROJECT_BASE + "config/", new_config, format="json"
)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["git"]["push_to"], "/tmp/bbb")
resp = self.api_client.get(self.PROJECT_BASE + "config/")
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["git"]["push_to"], "/tmp/bbb")
def test_update_project_head(self):
resp = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
self.api_client.login(username=self.user, password=self.password)
resp_before = self.api_client.get(
self.PROJECT_BASE + "series/" + "20160628014747.20971-1-famz@redhat.com/"
)
data = {
"message_ids": ["20160628014747.20971-1-famz@redhat.com"],
"old_head": "None",
"new_head": "000000",
}
resp = self.api_client.post(
self.PROJECT_BASE + "update_project_head/",
data=json.dumps(data),
content_type="application/json",
)
resp_after = self.api_client.get(
self.PROJECT_BASE + "series/" + "20160628014747.20971-1-famz@redhat.com/"
)
self.assertEquals(resp_before.data["is_merged"], False)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["count"], 1)
self.assertEquals(resp.data["new_head"], "000000")
self.assertEquals(resp_after.data["is_merged"], True)
def test_project_post_no_login(self):
data = {"name": "keycodemapdb"}
resp = self.api_client.post(self.REST_BASE + "projects/", data=data)
self.assertEquals(resp.status_code, 401)
def test_project_post_minimal(self):
data = {"name": "keycodemapdb"}
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(self.REST_BASE + "projects/", data=data)
self.assertEquals(resp.status_code, 201)
self.assertEquals(
resp.data["resource_uri"].startswith(self.REST_BASE + "projects/"), True
)
self.assertEquals(resp.data["name"], data["name"])
resp = self.api_client.get(resp.data["resource_uri"])
self.assertEquals(resp.data["name"], data["name"])
def test_project_post(self):
self.api_client.login(username=self.user, password=self.password)
data = {
"name": "keycodemapdb",
"mailing_list": "qemu-devel@nongnu.org",
"prefix_tags": "keycodemapdb",
"url": "https://gitlab.com/keycodemap/keycodemapdb/",
"git": "https://gitlab.com/keycodemap/keycodemapdb/",
"description": "keycodemapdb generates code to translate key codes",
"display_order": 4321,
"parent_project": self.PROJECT_BASE,
}
resp = self.api_client.post(self.REST_BASE + "projects/", data=data)
self.assertEquals(resp.status_code, 201)
self.assertEquals(
resp.data["resource_uri"].startswith(self.REST_BASE + "projects/"), True
)
self.assertEquals(resp.data["name"], data["name"])
self.assertEquals(resp.data["mailing_list"], data["mailing_list"])
self.assertEquals(resp.data["prefix_tags"], data["prefix_tags"])
self.assertEquals(resp.data["url"], data["url"])
self.assertEquals(resp.data["git"], data["git"])
self.assertEquals(resp.data["description"], data["description"])
self.assertEquals(resp.data["display_order"], data["display_order"])
self.assertEquals(resp.data["logo"], None)
self.assertEquals(resp.data["parent_project"], self.PROJECT_BASE)
resp = self.api_client.get(resp.data["resource_uri"])
self.assertEquals(resp.data["name"], data["name"])
self.assertEquals(resp.data["mailing_list"], data["mailing_list"])
self.assertEquals(resp.data["prefix_tags"], data["prefix_tags"])
self.assertEquals(resp.data["url"], data["url"])
self.assertEquals(resp.data["git"], data["git"])
self.assertEquals(resp.data["description"], data["description"])
self.assertEquals(resp.data["display_order"], data["display_order"])
self.assertEquals(resp.data["logo"], None)
self.assertEquals(resp.data["parent_project"], self.PROJECT_BASE)
def test_project_results_list(self):
resp1 = self.api_client.get(self.PROJECT_BASE)
resp = self.api_client.get(resp1.data["results"])
self.assertEqual(resp.data["count"], len(resp.data["results"]))
def test_series_single(self):
resp = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH] quorum: Only compile when supported",
)
self.assertEqual(
resp.data["stripped_subject"], "quorum: Only compile when supported"
)
self.assertEqual(resp.data["is_complete"], True)
self.assertEqual(resp.data["total_patches"], 1)
self.assertEqual(len(resp.data["replies"]), 0)
self.assertEqual(len(resp.data["patches"]), 1)
self.assertEqual(resp.data["patches"][0]["subject"], resp.data["subject"])
self.assertEqual(
resp.data["patches"][0]["stripped_subject"], resp.data["stripped_subject"]
)
def test_series_multiple(self):
resp = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["stripped_subject"],
"Report format specific info for LUKS block driver",
)
self.assertEqual(resp.data["is_complete"], True)
self.assertEqual(resp.data["total_patches"], 2)
self.assertEqual(len(resp.data["replies"]), 2)
self.assertEqual(len(resp.data["patches"]), 2)
self.assertEqual(
resp.data["replies"][0]["resource_uri"],
self.PROJECT_BASE + "messages/5792265A.5070507@redhat.com/",
)
self.assertEqual(
resp.data["replies"][0]["in_reply_to"],
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["replies"][0]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["replies"][1]["resource_uri"],
self.PROJECT_BASE
+ "messages/e0858c00-ccb6-e533-ee3e-9ba84ca45a7b@redhat.com/",
)
self.assertEqual(
resp.data["replies"][1]["in_reply_to"],
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["replies"][1]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["patches"][0]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-2-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["patches"][0]["subject"],
"[Qemu-devel] [PATCH v4 1/2] crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["patches"][0]["stripped_subject"],
"crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["patches"][1]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-3-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["patches"][1]["subject"],
"[Qemu-devel] [PATCH v4 2/2] block: export LUKS specific data to qemu-img info",
)
self.assertEqual(
resp.data["patches"][1]["stripped_subject"],
"block: export LUKS specific data to qemu-img info",
)
def test_series_incomplete(self):
resp = self.apply_and_retrieve(
"0012-incomplete-series.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["stripped_subject"],
"Report format specific info for LUKS block driver",
)
self.assertEqual(resp.data["is_complete"], False)
self.assertEqual(resp.data["total_patches"], 2)
self.assertEqual(len(resp.data["replies"]), 2)
self.assertEqual(len(resp.data["patches"]), 1)
self.assertEqual(
resp.data["patches"][0]["subject"],
"[Qemu-devel] [PATCH v4 1/2] crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["patches"][0]["stripped_subject"],
"crypto: add support for querying parameters for block encryption",
)
def test_series_list(self):
self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
resp = self.api_client.get(self.REST_BASE + "series/")
self.assertEqual(resp.data["count"], 2)
resp = self.api_client.get(self.PROJECT_BASE + "series/")
self.assertEqual(resp.data["count"], 2)
resp = self.api_client.get(self.REST_BASE + "projects/12345/series/")
self.assertEqual(resp.status_code, 404)
def test_series_results_list(self):
resp1 = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
resp = self.api_client.get(resp1.data["results"])
self.assertEqual(resp.data["count"], len(resp.data["results"]))
def test_series_search(self):
resp1 = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
resp2 = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
resp = self.api_client.get(self.REST_BASE + "series/?q=quorum")
self.assertEqual(resp.data["count"], 1)
self.assertEqual(
resp.data["results"][0]["resource_uri"], resp2.data["resource_uri"]
)
self.assertEqual(resp.data["results"][0]["subject"], resp2.data["subject"])
self.assertEqual("replies" in resp.data["results"][0], False)
self.assertEqual("patches" in resp.data["results"][0], False)
resp = self.api_client.get(self.REST_BASE + "series/?q=project:QEMU")
self.assertEqual(resp.data["count"], 2)
self.assertEqual(
resp.data["results"][0]["resource_uri"], resp1.data["resource_uri"]
)
self.assertEqual(resp.data["results"][0]["subject"], resp1.data["subject"])
self.assertEqual("replies" in resp.data["results"][0], False)
self.assertEqual("patches" in resp.data["results"][0], False)
self.assertEqual(
resp.data["results"][1]["resource_uri"], resp2.data["resource_uri"]
)
self.assertEqual(resp.data["results"][1]["subject"], resp2.data["subject"])
self.assertEqual("replies" in resp.data["results"][1], False)
self.assertEqual("patches" in resp.data["results"][1], False)
resp = self.api_client.get(self.REST_BASE + "projects/12345/series/?q=quorum")
self.assertEqual(resp.status_code, 404)
resp = self.api_client.get(
self.REST_BASE + "projects/12345/series/?q=project:QEMU"
)
self.assertEqual(resp.status_code, 404)
def test_series_delete(self):
test_message_id = "1469192015-16487-1-git-send-email-berrange@redhat.com"
series = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz", self.p.id, test_message_id
)
message = series.data["message"]
resp_before = self.api_client.get(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
resp_reply_before = self.api_client.get(message + "replies/")
resp_without_login = self.api_client.delete(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.delete(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
self.api_client.logout()
resp_after = self.api_client.get(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
resp_reply_after = self.api_client.get(message + "replies/")
self.assertEqual(resp_before.status_code, 200)
self.assertEqual(resp_reply_before.status_code, 200)
self.assertEqual(resp_without_login.status_code, 401)
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp_after.status_code, 404)
self.assertEqual(resp_reply_after.status_code, 404)
def test_create_message(self):
dp = self.get_data_path("0022-another-simple-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="application/json"
)
self.assertEqual(resp.status_code, 201)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v2 10/27] imx_fec: Reserve full 4K "
"page for the register file",
)
def test_patch_message(self):
the_tags = ["Reviewed-by: Paolo Bonzini <pbonzini@redhat.com"]
dp = self.get_data_path("0022-another-simple-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="application/json"
)
self.assertEqual(resp.status_code, 201)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(resp_get.data["tags"], [])
resp = self.api_client.patch(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/",
{"tags": the_tags},
)
self.assertEqual(resp.status_code, 200)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(resp_get.data["tags"], the_tags)
def test_create_text_message(self):
dp = self.get_data_path("0004-multiple-patch-reviewed.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/1469192015-16487-1-git-send-email-berrange@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
def test_patch_series(self):
dp = self.get_data_path("0001-simple-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
resp = self.api_client.patch(
self.PROJECT_BASE + "series/20160628014747.20971-1-famz@redhat.com/",
{"is_tested": True},
)
self.assertEqual(resp.status_code, 200)
resp_get = self.api_client.get(
self.PROJECT_BASE + "series/20160628014747.20971-1-famz@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertTrue(resp_get.data["is_tested"])
def test_create_message_without_project_pk(self):
dp = self.get_data_path("0024-multiple-project-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="application/json"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 2)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp_get.data["subject"],
"[Qemu-devel] [PATCH 1/7] SecurityPkg/Tcg2Pei: drop Tcg2PhysicalPresenceLib dependency",
)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 200)
def test_create_text_message_without_project_pk(self):
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 2)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp_get.data["subject"],
"[Qemu-devel] [PATCH 1/7] SecurityPkg/Tcg2Pei: drop Tcg2PhysicalPresenceLib dependency",
)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 200)
def test_without_login_create_message(self):
dp = self.get_data_path("0022-another-simple-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 401)
def test_non_maintainer_create_message(self):
self.create_user(username="test", password="userpass")
self.api_client.login(username="test", password="userpass")
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 0)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 404)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 404)
def test_maintainer_create_message(self):
test = self.create_user(username="test", password="userpass")
self.api_client.login(username="test", password="userpass")
self.p.maintainers = (test,)
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 1)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 404)
def test_importer_create_message(self):
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.create_user(username="test", password="userpass", groups=["importers"])
self.api_client.login(username="test", password="userpass")
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 2)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp_get.data["subject"],
"[Qemu-devel] [PATCH 1/7] SecurityPkg/Tcg2Pei: drop Tcg2PhysicalPresenceLib dependency",
)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 200)
def test_message(self):
series = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
message = series.data["patches"][0]["resource_uri"]
resp = self.api_client.get(message)
self.assertEqual(resp.data["mbox"], Message.objects.all()[0].get_mbox())
def test_message_mbox(self):
series = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
message = series.data["patches"][0]["resource_uri"]
resp = self.client.get(message + "mbox/")
self.assertEqual(resp.data, Message.objects.all()[0].get_mbox())
def test_address_serializer(self):
data1 = {"name": "Shubham", "address": "shubhamjain7495@gmail.com"}
serializer1 = AddressSerializer(data=data1)
valid1 = serializer1.is_valid()
valid_data1 = serializer1.validated_data
data2 = {"name": 123, "address": "shubhamjain7495@gmail.com"}
serializer2 = AddressSerializer(data=data2)
valid2 = serializer2.is_valid()
valid_data2 = serializer2.validated_data
self.assertEqual(valid1, True)
self.assertEqual(
valid_data1,
OrderedDict(
[("name", "Shubham"), ("address", "shubhamjain7495@gmail.com")]
),
)
self.assertEqual(valid2, True)
self.assertEqual(
valid_data2,
OrderedDict([("name", "123"), ("address", "shubhamjain7495@gmail.com")]),
)
def test_message_replies(self):
series = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
message = series.data["message"]
resp = self.api_client.get(message + "replies/")
self.assertEqual(resp.data["count"], 4)
self.assertEqual(
resp.data["results"][0]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-2-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["results"][0]["subject"],
"[Qemu-devel] [PATCH v4 1/2] crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["results"][1]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-3-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["results"][1]["subject"],
"[Qemu-devel] [PATCH v4 2/2] block: export LUKS specific data to qemu-img info",
)
self.assertEqual(
resp.data["results"][2]["resource_uri"],
self.PROJECT_BASE + "messages/5792265A.5070507@redhat.com/",
)
self.assertEqual(
resp.data["results"][2]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["results"][3]["resource_uri"],
self.PROJECT_BASE
+ "messages/e0858c00-ccb6-e533-ee3e-9ba84ca45a7b@redhat.com/",
)
self.assertEqual(
resp.data["results"][3]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
def test_schema(self):
resp = self.api_client.get(self.REST_BASE + "schema/")
self.assertEqual(resp.status_code, 200)
if __name__ == "__main__":
main()
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import testtools
from glanceclient import exc
from glanceclient.tests import utils
from glanceclient.v2 import images
_CHKSUM = '93264c3edf5972c9f1cb309543d38a5c'
_CHKSUM1 = '54264c3edf5972c9f1cb309453d38a46'
_TAG1 = 'power'
_TAG2 = '64bit'
_BOGUS_ID = '63e7f218-29de-4477-abdc-8db7c9533188'
_EVERYTHING_ID = '802cbbb7-0379-4c38-853f-37302b5e3d29'
_OWNED_IMAGE_ID = 'a4963502-acc7-42ba-ad60-5aa0962b7faf'
_OWNER_ID = '6bd473f0-79ae-40ad-a927-e07ec37b642f'
_PRIVATE_ID = 'e33560a7-3964-4de5-8339-5a24559f99ab'
_PUBLIC_ID = '857806e7-05b6-48e0-9d40-cb0e6fb727b9'
_SHARED_ID = '331ac905-2a38-44c5-a83d-653db8f08313'
_STATUS_REJECTED_ID = 'f3ea56ff-d7e4-4451-998c-1e3d33539c8e'
data_fixtures = {
'/v2/schemas/image': {
'GET': (
{},
{
'name': 'image',
'properties': {
'id': {},
'name': {},
'locations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'metadata': {'type': 'object'},
'url': {'type': 'string'},
},
'required': ['url', 'metadata'],
},
},
'color': {'type': 'string', 'is_base': False},
},
'additionalProperties': {'type': 'string'},
},
),
},
'/v2/images?limit=%d' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images?limit=2': {
'GET': (
{},
{
'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
],
'next': ('/v2/images?limit=2&'
'marker=6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'),
},
),
},
'/v2/images?limit=1': {
'GET': (
{},
{
'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
],
'next': ('/v2/images?limit=1&'
'marker=3a4560a1-e585-443e-9b39-553b46ec92d1'),
},
),
},
('/v2/images?limit=1&marker=3a4560a1-e585-443e-9b39-553b46ec92d1'): {
'GET': (
{},
{'images': [
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
('/v2/images?limit=1&marker=6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'): {
'GET': (
{},
{'images': [
{
'id': '3f99bf80-2ee6-47cf-acfe-1f1fabb7e811',
'name': 'image-3',
},
]},
),
},
'/v2/images/3a4560a1-e585-443e-9b39-553b46ec92d1': {
'GET': (
{},
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
),
'PATCH': (
{},
'',
),
},
'/v2/images/e7e59ff6-fa2e-4075-87d3-1a1398a07dc3': {
'GET': (
{},
{
'id': 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3',
'name': 'image-3',
'barney': 'rubble',
'george': 'jetson',
'color': 'red',
},
),
'PATCH': (
{},
'',
),
},
'/v2/images': {
'POST': (
{},
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
),
},
'/v2/images/87b634c1-f893-33c9-28a9-e5673c99239a': {
'DELETE': (
{},
{
'id': '87b634c1-f893-33c9-28a9-e5673c99239a',
},
),
},
'/v2/images/606b0e88-7c5a-4d54-b5bb-046105d4de6f/file': {
'PUT': (
{},
'',
),
},
'/v2/images/5cc4bebc-db27-11e1-a1eb-080027cbe205/file': {
'GET': (
{},
'A',
),
},
'/v2/images/66fb18d6-db27-11e1-a1eb-080027cbe205/file': {
'GET': (
{
'content-md5': 'wrong'
},
'BB',
),
},
'/v2/images/1b1c6366-dd57-11e1-af0f-02163e68b1d8/file': {
'GET': (
{
'content-md5': 'defb99e69a9f1f6e06f15006b1f166ae'
},
'CCC',
),
},
'/v2/images?limit=%d&visibility=public' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _PUBLIC_ID,
'harvey': 'lipshitz',
},
]},
),
},
'/v2/images?limit=%d&visibility=private' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _PRIVATE_ID,
},
]},
),
},
'/v2/images?limit=%d&visibility=shared' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _SHARED_ID,
},
]},
),
},
'/v2/images?limit=%d&member_status=rejected' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _STATUS_REJECTED_ID,
},
]},
),
},
'/v2/images?limit=%d&member_status=pending' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': []},
),
},
'/v2/images?limit=%d&owner=%s' % (images.DEFAULT_PAGE_SIZE, _OWNER_ID): {
'GET': (
{},
{'images': [
{
'id': _OWNED_IMAGE_ID,
},
]},
),
},
'/v2/images?limit=%d&owner=%s' % (images.DEFAULT_PAGE_SIZE, _BOGUS_ID): {
'GET': (
{},
{'images': []},
),
},
'/v2/images?limit=%d&member_status=pending&owner=%s&visibility=shared'
% (images.DEFAULT_PAGE_SIZE, _BOGUS_ID): {
'GET': (
{},
{'images': [
{
'id': _EVERYTHING_ID,
},
]},
),
},
'/v2/images?checksum=%s&limit=%d' % (_CHKSUM, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
}
]},
),
},
'/v2/images?checksum=%s&limit=%d' % (_CHKSUM1, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': [
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images?checksum=wrong&limit=%d' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': []},
),
},
'/v2/images?limit=%d&tag=%s' % (images.DEFAULT_PAGE_SIZE, _TAG1): {
'GET': (
{},
{'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
}
]},
),
},
'/v2/images?limit=%d&tag=%s' % (images.DEFAULT_PAGE_SIZE, _TAG2): {
'GET': (
{},
{'images': [
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images?limit=%d&tag=%s&tag=%s' % (images.DEFAULT_PAGE_SIZE,
_TAG1, _TAG2):
{
'GET': (
{},
{'images': [
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
}
]},
),
},
'/v2/images?limit=%d&tag=fake' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': []},
),
},
'/v2/images/a2b83adc-888e-11e3-8872-78acc0b951d8': {
'GET': (
{},
{
'id': 'a2b83adc-888e-11e3-8872-78acc0b951d8',
'name': 'image-location-tests',
'locations': [{u'url': u'http://foo.com/',
u'metadata': {u'foo': u'foometa'}},
{u'url': u'http://bar.com/',
u'metadata': {u'bar': u'barmeta'}}],
},
),
'PATCH': (
{},
'',
)
},
'/v2/images?limit=%d&os_distro=NixOS' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '8b052954-c76c-4e02-8e90-be89a70183a8',
'name': 'image-5',
'os_distro': 'NixOS',
},
]},
),
},
'/v2/images?limit=%d&my_little_property=cant_be_this_cute' %
images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': []},
),
},
'/v2/images?limit=%d&sort_key=name' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images?limit=%d&sort_key=name&sort_key=id'
% images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image',
},
]},
),
},
'/v2/images?limit=%d&sort_dir=desc&sort_key=id'
% images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
]},
),
},
'/v2/images?limit=%d&sort_dir=desc&sort_key=name&sort_key=id'
% images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
]},
),
},
'/v2/images?limit=%d&sort_dir=desc&sort_dir=asc&sort_key=name&sort_key=id'
% images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
]},
),
},
'/v2/images?limit=%d&sort=name%%3Adesc%%2Csize%%3Aasc'
% images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
]},
),
},
}
schema_fixtures = {
'image': {
'GET': (
{},
{
'name': 'image',
'properties': {
'id': {},
'name': {},
'locations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'metadata': {'type': 'object'},
'url': {'type': 'string'},
},
'required': ['url', 'metadata'],
}
},
'color': {'type': 'string', 'is_base': False},
'tags': {'type': 'array'},
},
'additionalProperties': {'type': 'string'},
}
)
}
}
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(data_fixtures)
self.schema_api = utils.FakeSchemaAPI(schema_fixtures)
self.controller = images.Controller(self.api, self.schema_api)
def test_list_images(self):
# NOTE(bcwaldon):cast to list since the controller returns a generator
images = list(self.controller.list())
self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id)
self.assertEqual('image-1', images[0].name)
self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id)
self.assertEqual('image-2', images[1].name)
def test_list_images_paginated(self):
# NOTE(bcwaldon):cast to list since the controller returns a generator
images = list(self.controller.list(page_size=1))
self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id)
self.assertEqual('image-1', images[0].name)
self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id)
self.assertEqual('image-2', images[1].name)
def test_list_images_paginated_with_limit(self):
# NOTE(bcwaldon):cast to list since the controller returns a generator
images = list(self.controller.list(limit=3, page_size=2))
self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id)
self.assertEqual('image-1', images[0].name)
self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id)
self.assertEqual('image-2', images[1].name)
self.assertEqual('3f99bf80-2ee6-47cf-acfe-1f1fabb7e811', images[2].id)
self.assertEqual('image-3', images[2].name)
self.assertEqual(3, len(images))
def test_list_images_with_marker(self):
images = list(self.controller.list(limit=1,
marker='3a4560a1-e585-443e-9b39-553b46ec92d1'))
self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[0].id)
self.assertEqual('image-2', images[0].name)
def test_list_images_visibility_public(self):
filters = {'filters': {'visibility': 'public'}}
images = list(self.controller.list(**filters))
self.assertEqual(_PUBLIC_ID, images[0].id)
def test_list_images_visibility_private(self):
filters = {'filters': {'visibility': 'private'}}
images = list(self.controller.list(**filters))
self.assertEqual(_PRIVATE_ID, images[0].id)
def test_list_images_visibility_shared(self):
filters = {'filters': {'visibility': 'shared'}}
images = list(self.controller.list(**filters))
self.assertEqual(_SHARED_ID, images[0].id)
def test_list_images_member_status_rejected(self):
filters = {'filters': {'member_status': 'rejected'}}
images = list(self.controller.list(**filters))
self.assertEqual(_STATUS_REJECTED_ID, images[0].id)
def test_list_images_for_owner(self):
filters = {'filters': {'owner': _OWNER_ID}}
images = list(self.controller.list(**filters))
self.assertEqual(_OWNED_IMAGE_ID, images[0].id)
def test_list_images_for_checksum_single_image(self):
fake_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
filters = {'filters': {'checksum': _CHKSUM}}
images = list(self.controller.list(**filters))
self.assertEqual(1, len(images))
self.assertEqual('%s' % fake_id, images[0].id)
def test_list_images_for_checksum_multiple_images(self):
fake_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
fake_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'
filters = {'filters': {'checksum': _CHKSUM1}}
images = list(self.controller.list(**filters))
self.assertEqual(2, len(images))
self.assertEqual('%s' % fake_id1, images[0].id)
self.assertEqual('%s' % fake_id2, images[1].id)
def test_list_images_for_wrong_checksum(self):
filters = {'filters': {'checksum': 'wrong'}}
images = list(self.controller.list(**filters))
self.assertEqual(0, len(images))
def test_list_images_for_bogus_owner(self):
filters = {'filters': {'owner': _BOGUS_ID}}
images = list(self.controller.list(**filters))
self.assertEqual([], images)
def test_list_images_for_bunch_of_filters(self):
filters = {'filters': {'owner': _BOGUS_ID,
'visibility': 'shared',
'member_status': 'pending'}}
images = list(self.controller.list(**filters))
self.assertEqual(_EVERYTHING_ID, images[0].id)
def test_list_images_filters_encoding(self):
filters = {"owner": u"ni\xf1o"}
try:
list(self.controller.list(filters=filters))
except KeyError:
# NOTE(flaper87): It raises KeyError because there's
# no fixture supporting this query:
# /v2/images?owner=ni%C3%B1o&limit=20
# We just want to make sure filters are correctly encoded.
pass
self.assertEqual(b"ni\xc3\xb1o", filters["owner"])
def test_list_images_for_tag_single_image(self):
img_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
filters = {'filters': {'tag': [_TAG1]}}
images = list(self.controller.list(**filters))
self.assertEqual(1, len(images))
self.assertEqual('%s' % img_id, images[0].id)
def test_list_images_for_tag_multiple_images(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
img_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'
filters = {'filters': {'tag': [_TAG2]}}
images = list(self.controller.list(**filters))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[0].id)
self.assertEqual('%s' % img_id2, images[1].id)
def test_list_images_for_multi_tags(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
filters = {'filters': {'tag': [_TAG1, _TAG2]}}
images = list(self.controller.list(**filters))
self.assertEqual(1, len(images))
self.assertEqual('%s' % img_id1, images[0].id)
def test_list_images_for_non_existent_tag(self):
filters = {'filters': {'tag': ['fake']}}
images = list(self.controller.list(**filters))
self.assertEqual(0, len(images))
def test_list_images_for_invalid_tag(self):
filters = {'filters': {'tag': [[]]}}
self.assertRaises(exc.HTTPBadRequest,
list,
self.controller.list(**filters))
def test_list_images_with_single_sort_key(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
sort_key = 'name'
images = list(self.controller.list(sort_key=sort_key))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[0].id)
def test_list_with_multiple_sort_keys(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
sort_key = ['name', 'id']
images = list(self.controller.list(sort_key=sort_key))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[0].id)
def test_list_images_with_desc_sort_dir(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
sort_key = 'id'
sort_dir = 'desc'
images = list(self.controller.list(sort_key=sort_key,
sort_dir=sort_dir))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[1].id)
def test_list_images_with_multiple_sort_keys_and_one_sort_dir(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
sort_key = ['name', 'id']
sort_dir = 'desc'
images = list(self.controller.list(sort_key=sort_key,
sort_dir=sort_dir))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[1].id)
def test_list_images_with_multiple_sort_dirs(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
sort_key = ['name', 'id']
sort_dir = ['desc', 'asc']
images = list(self.controller.list(sort_key=sort_key,
sort_dir=sort_dir))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[1].id)
def test_list_images_with_new_sorting_syntax(self):
img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
sort = 'name:desc,size:asc'
images = list(self.controller.list(sort=sort))
self.assertEqual(2, len(images))
self.assertEqual('%s' % img_id1, images[1].id)
def test_list_images_sort_dirs_fewer_than_keys(self):
sort_key = ['name', 'id', 'created_at']
sort_dir = ['desc', 'asc']
self.assertRaises(exc.HTTPBadRequest,
list,
self.controller.list(
sort_key=sort_key,
sort_dir=sort_dir))
def test_list_images_combined_syntax(self):
sort_key = ['name', 'id']
sort_dir = ['desc', 'asc']
sort = 'name:asc'
self.assertRaises(exc.HTTPBadRequest,
list,
self.controller.list(
sort=sort,
sort_key=sort_key,
sort_dir=sort_dir))
def test_list_images_new_sorting_syntax_invalid_key(self):
sort = 'INVALID:asc'
self.assertRaises(exc.HTTPBadRequest,
list,
self.controller.list(
sort=sort))
def test_list_images_new_sorting_syntax_invalid_direction(self):
sort = 'name:INVALID'
self.assertRaises(exc.HTTPBadRequest,
list,
self.controller.list(
sort=sort))
def test_list_images_for_property(self):
filters = {'filters': dict([('os_distro', 'NixOS')])}
images = list(self.controller.list(**filters))
self.assertEqual(1, len(images))
def test_list_images_for_non_existent_property(self):
filters = {'filters': dict([('my_little_property',
'cant_be_this_cute')])}
images = list(self.controller.list(**filters))
self.assertEqual(0, len(images))
def test_get_image(self):
image = self.controller.get('3a4560a1-e585-443e-9b39-553b46ec92d1')
self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', image.id)
self.assertEqual('image-1', image.name)
def test_create_image(self):
properties = {
'name': 'image-1'
}
image = self.controller.create(**properties)
self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', image.id)
self.assertEqual('image-1', image.name)
def test_create_bad_additionalProperty_type(self):
properties = {
'name': 'image-1',
'bad_prop': True,
}
with testtools.ExpectedException(TypeError):
self.controller.create(**properties)
def test_delete_image(self):
self.controller.delete('87b634c1-f893-33c9-28a9-e5673c99239a')
expect = [
('DELETE',
'/v2/images/87b634c1-f893-33c9-28a9-e5673c99239a',
{},
None)]
self.assertEqual(expect, self.api.calls)
def test_data_upload(self):
image_data = 'CCC'
image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f'
self.controller.upload(image_id, image_data)
expect = [('PUT', '/v2/images/%s/file' % image_id,
{'Content-Type': 'application/octet-stream'},
image_data)]
self.assertEqual(expect, self.api.calls)
def test_data_upload_w_size(self):
image_data = 'CCC'
image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f'
self.controller.upload(image_id, image_data, image_size=3)
body = {'image_data': image_data,
'image_size': 3}
expect = [('PUT', '/v2/images/%s/file' % image_id,
{'Content-Type': 'application/octet-stream'},
sorted(body.items()))]
self.assertEqual(expect, self.api.calls)
def test_data_without_checksum(self):
body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205',
do_checksum=False)
body = ''.join([b for b in body])
self.assertEqual('A', body)
body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205')
body = ''.join([b for b in body])
self.assertEqual('A', body)
def test_data_with_wrong_checksum(self):
body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205',
do_checksum=False)
body = ''.join([b for b in body])
self.assertEqual('BB', body)
body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205')
try:
body = ''.join([b for b in body])
self.fail('data did not raise an error.')
except IOError as e:
self.assertEqual(errno.EPIPE, e.errno)
msg = 'was 9d3d9048db16a7eee539e93e3618cbe7 expected wrong'
self.assertIn(msg, str(e))
def test_data_with_checksum(self):
body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8',
do_checksum=False)
body = ''.join([b for b in body])
self.assertEqual('CCC', body)
body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8')
body = ''.join([b for b in body])
self.assertEqual('CCC', body)
def test_update_replace_prop(self):
image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
params = {'name': 'pong'}
image = self.controller.update(image_id, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = [[('op', 'replace'), ('path', '/name'),
('value', 'pong')]]
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
# NOTE(bcwaldon):due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual('image-1', image.name)
def test_update_add_prop(self):
image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
params = {'finn': 'human'}
image = self.controller.update(image_id, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = [[('op', 'add'), ('path', '/finn'), ('value', 'human')]]
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
# NOTE(bcwaldon):due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual('image-1', image.name)
def test_update_remove_prop(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
remove_props = ['barney']
image = self.controller.update(image_id, remove_props)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = [[('op', 'remove'), ('path', '/barney')]]
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
# NOTE(bcwaldon):due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual('image-3', image.name)
def test_update_replace_remove_same_prop(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
# Updating a property takes precedence over removing a property
params = {'barney': 'miller'}
remove_props = ['barney']
image = self.controller.update(image_id, remove_props, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = ([[('op', 'replace'), ('path', '/barney'),
('value', 'miller')]])
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
# NOTE(bcwaldon):due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual('image-3', image.name)
def test_update_add_remove_same_prop(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
# Adding a property takes precedence over removing a property
params = {'finn': 'human'}
remove_props = ['finn']
image = self.controller.update(image_id, remove_props, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = [[('op', 'add'), ('path', '/finn'), ('value', 'human')]]
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
# NOTE(bcwaldon):due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual('image-3', image.name)
def test_update_bad_additionalProperty_type(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
params = {'name': 'pong', 'bad_prop': False}
with testtools.ExpectedException(TypeError):
self.controller.update(image_id, **params)
def test_update_add_custom_property(self):
image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
params = {'color': 'red'}
image = self.controller.update(image_id, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = [[('op', 'add'), ('path', '/color'), ('value', 'red')]]
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
def test_update_replace_custom_property(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
params = {'color': 'blue'}
image = self.controller.update(image_id, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.1-json-patch',
}
expect_body = [[('op', 'replace'), ('path', '/color'),
('value', 'blue')]]
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(image_id, image.id)
def test_location_ops_when_server_disabled_location_ops(self):
# Location operations should not be allowed if server has not
# enabled location related operations
image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
estr = 'The administrator has disabled API access to image locations'
url = 'http://bar.com/'
meta = {'bar': 'barmeta'}
e = self.assertRaises(exc.HTTPBadRequest,
self.controller.add_location,
image_id, url, meta)
self.assertIn(estr, str(e))
e = self.assertRaises(exc.HTTPBadRequest,
self.controller.delete_locations,
image_id, set([url]))
self.assertIn(estr, str(e))
e = self.assertRaises(exc.HTTPBadRequest,
self.controller.update_location,
image_id, url, meta)
self.assertIn(estr, str(e))
def _empty_get(self, image_id):
return ('GET', '/v2/images/%s' % image_id, {}, None)
def _patch_req(self, image_id, patch_body):
c_type = 'application/openstack-images-v2.1-json-patch'
data = [sorted(d.items()) for d in patch_body]
return ('PATCH',
'/v2/images/%s' % image_id,
{'Content-Type': c_type},
data)
def test_add_location(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
new_loc = {'url': 'http://spam.com/', 'metadata': {'spam': 'ham'}}
add_patch = {'path': '/locations/-', 'value': new_loc, 'op': 'add'}
self.controller.add_location(image_id, **new_loc)
self.assertEqual(self.api.calls, [
self._empty_get(image_id),
self._patch_req(image_id, [add_patch]),
self._empty_get(image_id)
])
def test_add_duplicate_location(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
new_loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'newfoo'}}
err_str = 'A location entry at %s already exists' % new_loc['url']
err = self.assertRaises(exc.HTTPConflict,
self.controller.add_location,
image_id, **new_loc)
self.assertIn(err_str, str(err))
def test_remove_location(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
url_set = set(['http://foo.com/', 'http://bar.com/'])
del_patches = [{'path': '/locations/1', 'op': 'remove'},
{'path': '/locations/0', 'op': 'remove'}]
self.controller.delete_locations(image_id, url_set)
self.assertEqual(self.api.calls, [
self._empty_get(image_id),
self._patch_req(image_id, del_patches)
])
def test_remove_missing_location(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
url_set = set(['http://spam.ham/'])
err_str = 'Unknown URL(s): %s' % list(url_set)
err = self.assertRaises(exc.HTTPNotFound,
self.controller.delete_locations,
image_id, url_set)
self.assertIn(err_str, str(err))
def test_update_location(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
new_loc = {'url': 'http://foo.com/', 'metadata': {'spam': 'ham'}}
fixture_idx = '/v2/images/%s' % (image_id)
orig_locations = data_fixtures[fixture_idx]['GET'][1]['locations']
loc_map = dict([(l['url'], l) for l in orig_locations])
loc_map[new_loc['url']] = new_loc
mod_patch = [{'path': '/locations', 'op': 'replace',
'value': []},
{'path': '/locations', 'op': 'replace',
'value': list(loc_map.values())}]
self.controller.update_location(image_id, **new_loc)
self.assertEqual(self.api.calls, [
self._empty_get(image_id),
self._patch_req(image_id, mod_patch),
self._empty_get(image_id)
])
def test_update_tags(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
tag_map = {'tags': ['tag01', 'tag02', 'tag03']}
image = self.controller.update(image_id, **tag_map)
expected_body = [{'path': '/tags', 'op': 'replace',
'value': tag_map['tags']}]
expected = [
self._empty_get(image_id),
self._patch_req(image_id, expected_body),
self._empty_get(image_id)
]
self.assertEqual(expected, self.api.calls)
self.assertEqual(image_id, image.id)
def test_update_missing_location(self):
image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8'
new_loc = {'url': 'http://spam.com/', 'metadata': {'spam': 'ham'}}
err_str = 'Unknown URL: %s' % new_loc['url']
err = self.assertRaises(exc.HTTPNotFound,
self.controller.update_location,
image_id, **new_loc)
self.assertIn(err_str, str(err))
|
|
"""
Django settings for test_project project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from __future__ import unicode_literals
from logging.handlers import SysLogHandler
import os
from modoboa.test_settings import * # noqa
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w537@nm@5n)=+e%-7*z-jxf21a#0k%uv^rbu**+cj4=_u57e(8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = 'DEBUG' in os.environ
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
]
SITE_ID = 1
# Security settings
X_FRAME_OPTIONS = "SAMEORIGIN"
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'reversion',
'ckeditor',
'ckeditor_uploader',
'rest_framework',
'rest_framework.authtoken',
'django_otp',
'django_otp.plugins.otp_totp',
'django_otp.plugins.otp_static',
)
# A dedicated place to register Modoboa applications
# Do not delete it.
# Do not change the order.
MODOBOA_APPS = (
'modoboa',
'modoboa.core',
'modoboa.lib',
'modoboa.admin',
'modoboa.transport',
'modoboa.relaydomains',
'modoboa.limits',
'modoboa.parameters',
# Modoboa extensions here.
'modoboa_dmarc',
)
INSTALLED_APPS += MODOBOA_APPS
AUTH_USER_MODEL = 'core.User'
MIDDLEWARE = (
'x_forwarded_for.middleware.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django_otp.middleware.OTPMiddleware',
'modoboa.core.middleware.TwoFAMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'modoboa.core.middleware.LocalConfigMiddleware',
'modoboa.lib.middleware.AjaxLoginRedirect',
'modoboa.lib.middleware.CommonExceptionCatcher',
'modoboa.lib.middleware.RequestCatcherMiddleware',
)
AUTHENTICATION_BACKENDS = (
# 'modoboa.lib.authbackends.LDAPBackend',
# 'modoboa.lib.authbackends.SMTPBackend',
'django.contrib.auth.backends.ModelBackend',
)
# SMTP authentication
# AUTH_SMTP_SERVER_ADDRESS = 'localhost'
# AUTH_SMTP_SERVER_PORT = 25
# AUTH_SMTP_SECURED_MODE = None # 'ssl' or 'starttls' are accepted
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'modoboa.core.context_processors.top_notifications',
],
'debug': TEMPLATE_DEBUG,
},
},
]
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/sitestatic/'
STATIC_ROOT = os.path.join(BASE_DIR, 'sitestatic')
STATICFILES_DIRS = (
# os.path.join(BASE_DIR, '..', 'modoboa', 'bower_components'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Rest framework settings
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
}
# Modoboa settings
# MODOBOA_CUSTOM_LOGO = os.path.join(MEDIA_URL, "custom_logo.png")
# DOVECOT_LOOKUP_PATH = ('/path/to/dovecot', )
MODOBOA_API_URL = 'https://api.modoboa.org/1/'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
{
'NAME': 'modoboa.core.password_validation.ComplexityValidator',
'OPTIONS': {
'upper': 1,
'lower': 1,
'digits': 1,
'specials': 0
}
},
]
# CKeditor
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_RESTRICT_BY_USER = True
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_ALLOW_NONIMAGE_FILES = False
CKEDITOR_CONFIGS = {
'default': {
'allowedContent': True,
'toolbar': 'Modoboa',
'width': None,
'toolbar_Modoboa': [
['Bold', 'Italic', 'Underline'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['BidiLtr', 'BidiRtl', 'Language'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent'],
['Undo', 'Redo'],
['Link', 'Unlink', 'Anchor', '-', 'Smiley'],
['TextColor', 'BGColor', '-', 'Source'],
['Font', 'FontSize'],
['Image', ],
['SpellChecker']
],
},
}
# Logging configuration
LOGGING = {
'version': 1,
'formatters': {
'syslog': {
'format': '%(name)s: %(levelname)s %(message)s'
},
},
'handlers': {
'syslog-auth': {
'class': 'logging.handlers.SysLogHandler',
'facility': SysLogHandler.LOG_AUTH,
'formatter': 'syslog'
},
'modoboa': {
'class': 'modoboa.core.loggers.SQLHandler',
}
},
'loggers': {
'modoboa.auth': {
'handlers': ['syslog-auth', 'modoboa'],
'level': 'INFO',
'propagate': False
},
'modoboa.admin': {
'handlers': ['modoboa'],
'level': 'INFO',
'propagate': False
}
}
}
# Load settings from extensions
|
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from neutron_lib import constants as n_const
from oslo_db import exception as db_exc
from oslo_utils import versionutils
from oslo_versionedobjects import exception
from oslo_versionedobjects import fields as obj_fields
from neutron.common import exceptions
from neutron.db.models import l3
from neutron.db import models_v2
from neutron.db.qos import models as qos_db_model
from neutron.db import rbac_db_models
from neutron.objects import base as base_db
from neutron.objects import common_types
from neutron.objects.db import api as obj_db_api
from neutron.objects.qos import binding
from neutron.objects.qos import rule as rule_obj_impl
from neutron.objects import rbac
from neutron.objects import rbac_db
@base_db.NeutronObjectRegistry.register
class QosPolicyRBAC(rbac.RBACBaseObject):
# Version 1.0: Initial version
# Version 1.1: Inherit from rbac_db.RBACBaseObject; added 'id' and
# 'project_id'; changed 'object_id' from StringField to
# UUIDField
VERSION = '1.1'
db_model = rbac_db_models.QosPolicyRBAC
def obj_make_compatible(self, primitive, target_version):
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 1):
standard_fields = ['id', 'project_id']
for f in standard_fields:
primitive.pop(f)
@base_db.NeutronObjectRegistry.register
class QosPolicy(rbac_db.NeutronRbacObject):
# Version 1.0: Initial version
# Version 1.1: QosDscpMarkingRule introduced
# Version 1.2: Added QosMinimumBandwidthRule
# Version 1.3: Added standard attributes (created_at, revision, etc)
# Version 1.4: Changed tenant_id to project_id
# Version 1.5: Direction for bandwidth limit rule added
# Version 1.6: Added "is_default" field
# Version 1.7: Added floating IP bindings
# Version 1.8: Added router gateway QoS policy bindings
VERSION = '1.8'
# required by RbacNeutronMetaclass
rbac_db_cls = QosPolicyRBAC
db_model = qos_db_model.QosPolicy
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
'name': obj_fields.StringField(),
'shared': obj_fields.BooleanField(default=False),
'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
'is_default': obj_fields.BooleanField(default=False),
}
fields_no_update = ['id', 'project_id']
synthetic_fields = ['rules', 'is_default']
extra_filter_names = {'is_default'}
binding_models = {'port': binding.QosPolicyPortBinding,
'network': binding.QosPolicyNetworkBinding,
'fip': binding.QosPolicyFloatingIPBinding,
'router': binding.QosPolicyRouterGatewayIPBinding}
def obj_load_attr(self, attrname):
if attrname == 'rules':
return self._reload_rules()
elif attrname == 'is_default':
return self._reload_is_default()
return super(QosPolicy, self).obj_load_attr(attrname)
def _reload_rules(self):
rules = rule_obj_impl.get_rules(self, self.obj_context, self.id)
setattr(self, 'rules', rules)
self.obj_reset_changes(['rules'])
def _reload_is_default(self):
if self.get_default() == self.id:
setattr(self, 'is_default', True)
else:
setattr(self, 'is_default', False)
self.obj_reset_changes(['is_default'])
def get_rule_by_id(self, rule_id):
"""Return rule specified by rule_id.
@raise QosRuleNotFound: if there is no such rule in the policy.
"""
for rule in self.rules:
if rule_id == rule.id:
return rule
raise exceptions.QosRuleNotFound(policy_id=self.id,
rule_id=rule_id)
# TODO(hichihara): For tag mechanism. This will be removed in bug/1704137
def to_dict(self):
_dict = super(QosPolicy, self).to_dict()
try:
_dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags]
except AttributeError:
# AttrtibuteError can be raised when accessing self.db_obj
# or self.db_obj.standard_attr
pass
return _dict
@classmethod
def get_policy_obj(cls, context, policy_id):
"""Fetch a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to fetch
:type policy_id: str uuid
:returns: a QosPolicy object
:raises: n_exc.QosPolicyNotFound
"""
obj = cls.get_object(context, id=policy_id)
if obj is None:
raise exceptions.QosPolicyNotFound(policy_id=policy_id)
return obj
@classmethod
def get_object(cls, context, **kwargs):
# We want to get the policy regardless of its tenant id. We'll make
# sure the tenant has permission to access the policy later on.
admin_context = context.elevated()
with cls.db_context_reader(admin_context):
policy_obj = super(QosPolicy, cls).get_object(admin_context,
**kwargs)
if (not policy_obj or
not cls.is_accessible(context, policy_obj)):
return
policy_obj.obj_load_attr('rules')
policy_obj.obj_load_attr('is_default')
return policy_obj
@classmethod
def get_objects(cls, context, _pager=None, validate_filters=True,
**kwargs):
# We want to get the policy regardless of its tenant id. We'll make
# sure the tenant has permission to access the policy later on.
admin_context = context.elevated()
with cls.db_context_reader(admin_context):
objs = super(QosPolicy, cls).get_objects(admin_context, _pager,
validate_filters,
**kwargs)
result = []
for obj in objs:
if not cls.is_accessible(context, obj):
continue
obj.obj_load_attr('rules')
obj.obj_load_attr('is_default')
result.append(obj)
return result
@classmethod
def _get_object_policy(cls, context, binding_cls, **kwargs):
with cls.db_context_reader(context):
binding_db_obj = obj_db_api.get_object(binding_cls, context,
**kwargs)
if binding_db_obj:
return cls.get_object(context, id=binding_db_obj['policy_id'])
@classmethod
def get_network_policy(cls, context, network_id):
return cls._get_object_policy(context, binding.QosPolicyNetworkBinding,
network_id=network_id)
@classmethod
def get_port_policy(cls, context, port_id):
return cls._get_object_policy(context, binding.QosPolicyPortBinding,
port_id=port_id)
@classmethod
def get_fip_policy(cls, context, fip_id):
return cls._get_object_policy(
context, binding.QosPolicyFloatingIPBinding, fip_id=fip_id)
@classmethod
def get_router_policy(cls, context, router_id):
return cls._get_object_policy(
context, binding.QosPolicyRouterGatewayIPBinding,
router_id=router_id)
# TODO(QoS): Consider extending base to trigger registered methods for us
def create(self):
with self.db_context_writer(self.obj_context):
super(QosPolicy, self).create()
if self.is_default:
self.set_default()
self.obj_load_attr('rules')
def update(self):
with self.db_context_writer(self.obj_context):
if 'is_default' in self.obj_what_changed():
if self.is_default:
self.set_default()
else:
self.unset_default()
super(QosPolicy, self).update()
def delete(self):
with self.db_context_writer(self.obj_context):
for object_type, obj_class in self.binding_models.items():
pager = base_db.Pager(limit=1)
binding_obj = obj_class.get_objects(self.obj_context,
policy_id=self.id,
_pager=pager)
if binding_obj:
raise exceptions.QosPolicyInUse(
policy_id=self.id,
object_type=object_type,
object_id=binding_obj[0]['%s_id' % object_type])
super(QosPolicy, self).delete()
def attach_network(self, network_id):
network_binding = {'policy_id': self.id,
'network_id': network_id}
network_binding_obj = binding.QosPolicyNetworkBinding(
self.obj_context, **network_binding)
try:
network_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.NetworkQosBindingError(policy_id=self.id,
net_id=network_id,
db_error=e)
def attach_port(self, port_id):
port_binding_obj = binding.QosPolicyPortBinding(
self.obj_context, policy_id=self.id, port_id=port_id)
try:
port_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.PortQosBindingError(policy_id=self.id,
port_id=port_id,
db_error=e)
def attach_floatingip(self, fip_id):
fip_binding_obj = binding.QosPolicyFloatingIPBinding(
self.obj_context, policy_id=self.id, fip_id=fip_id)
try:
fip_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.FloatingIPQosBindingError(policy_id=self.id,
fip_id=fip_id,
db_error=e)
def attach_router(self, router_id):
router_binding_obj = binding.QosPolicyRouterGatewayIPBinding(
self.obj_context, policy_id=self.id, router_id=router_id)
try:
router_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.RouterQosBindingError(policy_id=self.id,
router_id=router_id,
db_error=e)
def detach_network(self, network_id):
deleted = binding.QosPolicyNetworkBinding.delete_objects(
self.obj_context, network_id=network_id)
if not deleted:
raise exceptions.NetworkQosBindingNotFound(net_id=network_id,
policy_id=self.id)
def detach_port(self, port_id):
deleted = binding.QosPolicyPortBinding.delete_objects(self.obj_context,
port_id=port_id)
if not deleted:
raise exceptions.PortQosBindingNotFound(port_id=port_id,
policy_id=self.id)
def detach_floatingip(self, fip_id):
deleted = binding.QosPolicyFloatingIPBinding.delete_objects(
self.obj_context, fip_id=fip_id)
if not deleted:
raise exceptions.FloatingIPQosBindingNotFound(fip_id=fip_id,
policy_id=self.id)
def detach_router(self, router_id):
deleted = binding.QosPolicyRouterGatewayIPBinding.delete_objects(
self.obj_context, router_id=router_id)
if not deleted:
raise exceptions.RouterQosBindingNotFound(router_id=router_id,
policy_id=self.id)
def set_default(self):
if not self.get_default():
qos_default_policy = QosPolicyDefault(self.obj_context,
qos_policy_id=self.id,
project_id=self.project_id)
qos_default_policy.create()
elif self.get_default() != self.id:
raise exceptions.QoSPolicyDefaultAlreadyExists(
project_id=self.project_id)
def unset_default(self):
if self.get_default() == self.id:
qos_default_policy = QosPolicyDefault.get_object(
self.obj_context, project_id=self.project_id)
qos_default_policy.delete()
def get_default(self):
qos_default_policy = QosPolicyDefault.get_object(
self.obj_context, project_id=self.project_id)
if qos_default_policy:
return qos_default_policy.qos_policy_id
def get_bound_networks(self):
return [
nb.network_id
for nb in binding.QosPolicyNetworkBinding.get_objects(
self.obj_context, policy_id=self.id)
]
def get_bound_ports(self):
return [
pb.port_id
for pb in binding.QosPolicyPortBinding.get_objects(
self.obj_context, policy_id=self.id)
]
def get_bound_floatingips(self):
return [
fb.fip_id
for fb in binding.QosPolicyFloatingIPBinding.get_objects(
self.obj_context, policy_id=self.id)
]
def get_bound_routers(self):
return [
rb.router_id
for rb in binding.QosPolicyRouterGatewayIPBinding.get_objects(
self.obj_context, policy_id=self.id)
]
@classmethod
def _get_bound_tenant_ids(cls, session, binding_db, bound_db,
binding_db_id_column, policy_id):
return list(itertools.chain.from_iterable(
session.query(bound_db.tenant_id).join(
binding_db, bound_db.id == binding_db_id_column).filter(
binding_db.policy_id == policy_id).all()))
@classmethod
def get_bound_tenant_ids(cls, context, policy_id):
"""Implements RbacNeutronObject.get_bound_tenant_ids.
:returns: set -- a set of tenants' ids dependent on QosPolicy.
"""
net = models_v2.Network
qosnet = qos_db_model.QosNetworkPolicyBinding
port = models_v2.Port
qosport = qos_db_model.QosPortPolicyBinding
fip = l3.FloatingIP
qosfip = qos_db_model.QosFIPPolicyBinding
router = l3.Router
qosrouter = qos_db_model.QosRouterGatewayIPPolicyBinding
bound_tenants = []
with cls.db_context_reader(context):
bound_tenants.extend(cls._get_bound_tenant_ids(
context.session, qosnet, net, qosnet.network_id, policy_id))
bound_tenants.extend(
cls._get_bound_tenant_ids(context.session, qosport, port,
qosport.port_id, policy_id))
bound_tenants.extend(
cls._get_bound_tenant_ids(context.session, qosfip, fip,
qosfip.fip_id, policy_id))
bound_tenants.extend(
cls._get_bound_tenant_ids(context.session, qosrouter, router,
qosrouter.router_id, policy_id))
return set(bound_tenants)
def obj_make_compatible(self, primitive, target_version):
def filter_rules(obj_names, rules):
return [rule for rule in rules if
rule['versioned_object.name'] in obj_names]
def filter_ingress_bandwidth_limit_rules(rules):
bwlimit_obj_name = rule_obj_impl.QosBandwidthLimitRule.obj_name()
filtered_rules = []
for rule in rules:
if rule['versioned_object.name'] == bwlimit_obj_name:
direction = rule['versioned_object.data'].get("direction")
if direction == n_const.EGRESS_DIRECTION:
rule['versioned_object.data'].pop('direction')
filtered_rules.append(rule)
else:
filtered_rules.append(rule)
return filtered_rules
_target_version = versionutils.convert_version_to_tuple(target_version)
names = []
if _target_version >= (1, 0):
names.append(rule_obj_impl.QosBandwidthLimitRule.obj_name())
if _target_version >= (1, 1):
names.append(rule_obj_impl.QosDscpMarkingRule.obj_name())
if _target_version >= (1, 2):
names.append(rule_obj_impl.QosMinimumBandwidthRule.obj_name())
if 'rules' in primitive and names:
primitive['rules'] = filter_rules(names, primitive['rules'])
if _target_version < (1, 3):
standard_fields = ['revision_number', 'created_at', 'updated_at']
for f in standard_fields:
primitive.pop(f)
if primitive['description'] is None:
# description was not nullable before
raise exception.IncompatibleObjectVersion(
objver=target_version, objname='QoSPolicy')
if _target_version < (1, 4):
primitive['tenant_id'] = primitive.pop('project_id')
if _target_version < (1, 5):
if 'rules' in primitive:
primitive['rules'] = filter_ingress_bandwidth_limit_rules(
primitive['rules'])
if _target_version < (1, 6):
primitive.pop('is_default', None)
@base_db.NeutronObjectRegistry.register
class QosPolicyDefault(base_db.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = qos_db_model.QosPolicyDefault
fields = {
'qos_policy_id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
}
primary_keys = ['project_id']
|
|
"""
Support for HydroQuebec.
Get data from 'My Consumption Profile' page:
https://www.hydroquebec.com/portail/en/group/clientele/portrait-de-consommation
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.hydroquebec/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD,
CONF_NAME, CONF_MONITORED_VARIABLES)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['beautifulsoup4==4.5.3']
_LOGGER = logging.getLogger(__name__)
KILOWATT_HOUR = "kWh" # type: str
PRICE = "CAD" # type: str
DAYS = "days" # type: str
DEFAULT_NAME = "HydroQuebec"
REQUESTS_TIMEOUT = 15
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)
SENSOR_TYPES = {
'period_total_bill': ['Current period bill',
PRICE, 'mdi:square-inc-cash'],
'period_length': ['Current period length',
DAYS, 'mdi:calendar-today'],
'period_total_days': ['Total number of days in this period',
DAYS, 'mdi:calendar-today'],
'period_mean_daily_bill': ['Period daily average bill',
PRICE, 'mdi:square-inc-cash'],
'period_mean_daily_consumption': ['Period daily average consumption',
KILOWATT_HOUR, 'mdi:flash'],
'period_total_consumption': ['Total Consumption',
KILOWATT_HOUR, 'mdi:flash'],
'period_lower_price_consumption': ['Period Lower price consumption',
KILOWATT_HOUR, 'mdi:flash'],
'period_higher_price_consumption': ['Period Higher price consumption',
KILOWATT_HOUR, 'mdi:flash'],
'yesterday_total_consumption': ['Yesterday total consumption',
KILOWATT_HOUR, 'mdi:flash'],
'yesterday_lower_price_consumption': ['Yesterday lower price consumption',
KILOWATT_HOUR, 'mdi:flash'],
'yesterday_higher_price_consumption':
['Yesterday higher price consumption', KILOWATT_HOUR, 'mdi:flash'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_VARIABLES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
HOST = "https://www.hydroquebec.com"
HOME_URL = "{}/portail/web/clientele/authentification".format(HOST)
PROFILE_URL = ("{}/portail/fr/group/clientele/"
"portrait-de-consommation".format(HOST))
MONTHLY_MAP = (('period_total_bill', 'montantFacturePeriode'),
('period_length', 'nbJourLecturePeriode'),
('period_total_days', 'nbJourPrevuPeriode'),
('period_mean_daily_bill', 'moyenneDollarsJourPeriode'),
('period_mean_daily_consumption', 'moyenneKwhJourPeriode'),
('period_total_consumption', 'consoTotalPeriode'),
('period_lower_price_consumption', 'consoRegPeriode'),
('period_higher_price_consumption', 'consoHautPeriode'))
DAILY_MAP = (('yesterday_total_consumption', 'consoTotalQuot'),
('yesterday_lower_price_consumption', 'consoRegQuot'),
('yesterday_higher_price_consumption', 'consoHautQuot'))
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HydroQuebec sensor."""
# Create a data fetcher to support all of the configured sensors. Then make
# the first call to init the data.
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
hydroquebec_data = HydroquebecData(username, password)
hydroquebec_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
return False
name = config.get(CONF_NAME)
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(HydroQuebecSensor(hydroquebec_data, variable, name))
add_devices(sensors)
class HydroQuebecSensor(Entity):
"""Implementation of a HydroQuebec sensor."""
def __init__(self, hydroquebec_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self.entity_id = "sensor.{}_{}".format(name, sensor_type)
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.hydroquebec_data = hydroquebec_data
self._state = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data from Hydroquebec and update the state."""
self.hydroquebec_data.update()
self._state = round(self.hydroquebec_data.data[self.type], 2)
class HydroquebecData(object):
"""Get data from HydroQuebec."""
def __init__(self, username, password):
"""Initialize the data object."""
self.username = username
self.password = password
self.data = None
self.cookies = None
def _get_login_page(self):
"""Go to the login page."""
from bs4 import BeautifulSoup
try:
raw_res = requests.get(HOME_URL, timeout=REQUESTS_TIMEOUT)
except OSError:
_LOGGER.error("Can not connect to login page")
return False
# Get cookies
self.cookies = raw_res.cookies
# Get login url
soup = BeautifulSoup(raw_res.content, 'html.parser')
form_node = soup.find('form', {'name': 'fm'})
if form_node is None:
_LOGGER.error("No login form find")
return False
login_url = form_node.attrs.get('action')
if login_url is None:
_LOGGER.error("Can not found login url")
return False
return login_url
def _post_login_page(self, login_url):
"""Login to HydroQuebec website."""
data = {"login": self.username,
"_58_password": self.password}
try:
raw_res = requests.post(login_url,
data=data,
cookies=self.cookies,
allow_redirects=False,
timeout=REQUESTS_TIMEOUT)
except OSError:
_LOGGER.error("Can not submit login form")
return False
if raw_res.status_code != 302:
_LOGGER.error("Bad HTTP status code")
return False
# Update cookies
self.cookies.update(raw_res.cookies)
return True
def _get_p_p_id(self):
"""Get id of consumption profile."""
from bs4 import BeautifulSoup
try:
raw_res = requests.get(PROFILE_URL,
cookies=self.cookies,
timeout=REQUESTS_TIMEOUT)
except OSError:
_LOGGER.error("Can not get profile page")
return False
# Update cookies
self.cookies.update(raw_res.cookies)
# Looking for p_p_id
soup = BeautifulSoup(raw_res.content, 'html.parser')
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', "")
print(node_id)
if node_id.startswith("p_portraitConsommation_WAR"):
p_p_id = node_id[2:]
break
if p_p_id is None:
_LOGGER.error("Could not get p_p_id")
return False
return p_p_id
def _get_monthly_data(self, p_p_id):
"""Get monthly data."""
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_resource_id": ("resourceObtenirDonnees"
"PeriodesConsommation")}
try:
raw_res = requests.get(PROFILE_URL,
params=params,
cookies=self.cookies,
timeout=REQUESTS_TIMEOUT)
except OSError:
_LOGGER.error("Can not get monthly data")
return False
try:
json_output = raw_res.json()
except OSError:
_LOGGER.error("Could not get monthly data")
return False
if not json_output.get('success'):
_LOGGER.error("Could not get monthly data")
return False
return json_output.get('results')
def _get_daily_data(self, p_p_id, start_date, end_date):
"""Get daily data."""
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_resource_id":
"resourceObtenirDonneesQuotidiennesConsommation",
"dateDebutPeriode": start_date,
"dateFinPeriode": end_date}
try:
raw_res = requests.get(PROFILE_URL,
params=params,
cookies=self.cookies,
timeout=REQUESTS_TIMEOUT)
except OSError:
_LOGGER.error("Can not get daily data")
return False
try:
json_output = raw_res.json()
except OSError:
_LOGGER.error("Could not get daily data")
return False
if not json_output.get('success'):
_LOGGER.error("Could not get daily data")
return False
return json_output.get('results')
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from HydroQuebec."""
# Get login page
login_url = self._get_login_page()
if not login_url:
return
# Post login page
if not self._post_login_page(login_url):
return
# Get p_p_id
p_p_id = self._get_p_p_id()
if not p_p_id:
return
# Get Monthly data
monthly_data = self._get_monthly_data(p_p_id)[0]
if not monthly_data:
return
# Get daily data
start_date = monthly_data.get('dateDebutPeriode')
end_date = monthly_data.get('dateFinPeriode')
daily_data = self._get_daily_data(p_p_id, start_date, end_date)
if not daily_data:
return
daily_data = daily_data[0]['courant']
# format data
self.data = {}
for key1, key2 in MONTHLY_MAP:
self.data[key1] = monthly_data[key2]
for key1, key2 in DAILY_MAP:
self.data[key1] = daily_data[key2]
|
|
import random
import tdl
import dungeongenerator
import game
import instances
import level
import twitchchatmanager
from scenes import scene
class StartIntermissionEvent(object):
def __init__(self):
self.type = 'StartIntermission'
class LevelScene(scene.Scene):
instance = None
def __init__(self, x=0, y=0, width=54, height=30):
super().__init__(x, y, width, height)
self.level = None
self.player_spawn_area = []
self.tick_count = 0
self.change_level_requested = False
self._change_level_on_tick = 0
self.info = {'level': 0}
if not LevelScene.instance:
LevelScene.instance = self
instances.register('scene_root', self)
def update(self, time):
super().update(time)
self.update_fov()
def tick(self, tick):
super().tick(tick)
self.tick_count = tick
if self.change_level_requested:
instances.console.print('{} turns left.'.format(self._change_level_on_tick - tick))
if self._change_level_on_tick - tick <= 0 or self.active_player_count() == 0:
self.next_level()
def next_level(self):
instances.console.print('NEXT LEVEL!')
self.info['level'] += 1
tdl.event.push(StartIntermissionEvent())
self.init_scene()
def handle_events(self, event):
super().handle_events(event)
if game.Game.args.debug and event.type == 'KEYDOWN' and event.char.upper() == 'G':
self.next_level()
def draw(self, console):
self.console.clear()
# Draw items
for e in [n for n in self.children if not n.isinstance('Player') and not n.isinstance('Creature')]:
e.draw(self.console)
# Draw creatures
for e in [n for n in self.children if not n.isinstance('Player') and n.isinstance('Creature')]:
e.draw(self.console)
# Draw players
for e in [n for n in self.children if n.isinstance('Player')]:
e.draw(self.console)
console.blit(self.console, self.x, self.y, self.width, self.height)
def init_scene(self):
# Reset level change info
self.change_level_requested = False
self._change_level_on_tick = 0
# Persist players in level
self._children = [p for p in self.players if not p.idle]
self.level, new_entities, self.player_spawn_area = dungeongenerator.generate_level(29, 22, len(self._children), self.info)
self.append(self.level)
# Add generated entities to scene
for en in new_entities:
self.append(en)
health_bonus = len([p for p in self.players if p.state == 'PlayerExitedState'])
# Place players near stair
for p in self.players:
if p.state == 'PlayerExitedState':
# Overheal exited players up to 2x max health
p.current_health = min(p.current_health + health_bonus, p.max_health * 2)
p.brain.reset()
p.visible_tiles = set()
p.cheer_counter = 0
p.position = self.get_location_near_stairs()
self.children.append(twitchchatmanager.TwitchChatManager())
def check_collision(self, x, y):
"""Returns True if player can move into the given world coords
x: The x-coordinate in world space
y: The y-coordinate in world space
"""
if (x, y) not in self.level.data:
return False
char, fg, bg = self.level.get_char(x, y)
return char == ord(' ') or char == ord('.')
def is_visibility_blocked(self, x, y):
"""Returns True if visibility is blocked at the given coord."""
blockers = [c for c in self.children if c.blocks_visibility and c.position == (x, y)]
if blockers:
return True
return not self.check_collision(x, y)
def is_solid(self, x, y):
return not self.check_collision(x, y)
def get_level_entity_at(self, x, y):
return level.LevelEntity((x, y), self.level)
def get_entity_at(self, x, y):
result = []
for e in self.children:
if hasattr(e, 'position') and e.position == (x, y):
result.append(e)
if not result:
if self.is_solid(x, y):
result.append(level.LevelEntity((x, y), self.level))
def sort_ents(ent):
if ent.isinstance('Creature') and not ent.isinstance('Player'):
return 3
elif ent.isinstance('Player'):
return 2
elif ent.isinstance('Corpse'):
return 0.5
elif ent.isinstance('Item'):
return 1
return 0
result.sort(key=sort_ents, reverse=True)
return result
def get_entities(self, coords):
g = self.get_entity_at
return [entity for entity_sublist in [g(*pos) for pos in coords] for entity in entity_sublist if entity]
def get_entities_along_path(self, x1, y1, x2, y2):
coords = tdl.map.bresenham(x1, y1, x2, y2)
return self.get_entities(coords)
def check_visibility(self, x, y):
"""Returns true if given coordinate is in the player visible tiles"""
return (x, y) in self.level.visible_tiles
@property
def players(self):
return [p for p in self.children if p.isinstance('Player')]
def active_player_count(self):
return len([p for p in self.players if p.state != 'PlayerExitedState'])
@property
def downward_stair(self):
return [e for e in self.children if e.isinstance('Stairs') and e.name == 'Down'][0]
def change_level(self):
if not self.change_level_requested:
self.change_level_requested = True
self._change_level_on_tick = self.tick_count + 30
def get_location_near_stairs(self):
# Find open areas around stairs
rect = self.player_spawn_area
filled_location = [e.position for e in self.children if hasattr(e, 'position')]
possible_locations = []
for point in rect:
if point not in self.level.data:
continue
ch, fg, bg = self.level.data.get_char(*point)
if ch == ord('.'):
possible_locations.append(point)
possible_locations = list(set(possible_locations).difference(set(filled_location)))
if not possible_locations:
raise RuntimeError('Unable to find empty space around stairs up')
pos = possible_locations[random.randint(0, len(possible_locations) - 1)]
return pos
def update_fov(self):
self.level.visible_tiles = set()
for p in self.players:
self.level.visible_tiles = self.level.visible_tiles.union(p.visible_tiles)
self.level.seen_tiles = self.level.seen_tiles.union(p.visible_tiles)
if game.Game.args.debug:
for e in [c for c in self.children if c.isinstance('Creature')]:
self.level.visible_tiles = self.level.visible_tiles.union(e.visible_tiles)
self.level.seen_tiles = self.level.seen_tiles.union(e.visible_tiles)
if game.Game.args.no_fog:
self.level.visible_tiles = self.level.visible_tiles.union([(v[0], v[1]) for v in self.level.data])
self.level.seen_tiles = self.level.seen_tiles.union([(s[0], s[1]) for s in self.level.data])
|
|
import pandas as pd
import glob
import os
import argparse
import math
import datetime as dt
import numpy as np
import shutil
from string import Template
def concatenate_csvs(path):
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f) for f in all_files) # generators
concatenated_df = pd.concat(df_from_each_file, ignore_index=True)
del concatenated_df['Unnamed: 0'] # delete the blank column that gets added
concatenated_df['start_time'] = pd.to_datetime(concatenated_df['start_time'])
concatenated_df['end_time'] = pd.to_datetime(concatenated_df['end_time'])
concatenated_df = concatenated_df.sort_values(by="start_time").reset_index(drop=True)
return concatenated_df
# Calculate bearing
# See: https://gis.stackexchange.com/questions/29239/calculate-bearing-between-two-decimal-gps-coordinates/48911
def calc_bearing_between_points(startLat, startLong, endLat, endLong):
startLat = math.radians(startLat)
startLong = math.radians(startLong)
endLat = math.radians(endLat)
endLong = math.radians(endLong)
dLong = endLong - startLong
dPhi = math.log(math.tan(endLat/2.0+math.pi/4.0)/math.tan(startLat/2.0+math.pi/4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
bearing = (math.degrees(math.atan2(dLong, dPhi)) + 360.0) % 360.0;
return bearing
# Stacked bar chart functions
def count_vehicles_on_screen(concatenated_df, date, frames):
number_of_vehicles = []
number_of_buses = []
number_of_trams = []
number_of_cablecars = []
number_of_metros = []
number_of_trains = []
number_of_ferries = []
day = dt.datetime.strptime(date, "%Y-%m-%d")
thisday = dt.datetime.strftime(day, "%Y-%m-%d")
chunks = float(frames) / (60*24)
increment = float(60.0 / chunks)
the_day = [pd.to_datetime(thisday) + dt.timedelta(seconds = i*increment) for i in range(int(60 * 24 * chunks))]
count = 0
for increment in the_day:
vehicles_on_the_road = concatenated_df[(concatenated_df['end_time'] > increment) & (concatenated_df['start_time'] <= increment)]
number_vehicles_on_the_road = len(vehicles_on_the_road)
number_of_vehicles.append(number_vehicles_on_the_road)
for route_type in ['bus', 'tram', 'cablecar', 'metro', 'rail', 'ferry']:
just_this_mode = vehicles_on_the_road[vehicles_on_the_road['route_type'] == route_type]
number_of_this_mode = len(just_this_mode)
if route_type == 'bus':
number_of_buses.append(number_of_this_mode)
elif route_type == 'tram':
number_of_trams.append(number_of_this_mode)
elif route_type == 'cablecar':
number_of_cablecars.append(number_of_this_mode)
elif route_type == 'metro':
number_of_metros.append(number_of_this_mode)
elif route_type == 'rail':
number_of_trains.append(number_of_this_mode)
elif route_type == 'ferry':
number_of_ferries.append(number_of_this_mode)
if count % (60*chunks) == 0:
print increment
count += 1
vehicles = pd.DataFrame(zip(the_day, number_of_vehicles))
print len(vehicles.index), "= length of vehicles index"
buses = pd.DataFrame(zip(the_day, number_of_buses))
trams = pd.DataFrame(zip(the_day, number_of_trams))
cablecars = pd.DataFrame(zip(the_day, number_of_cablecars))
metros = pd.DataFrame(zip(the_day, number_of_metros))
trains = pd.DataFrame(zip(the_day, number_of_trains))
ferries = pd.DataFrame(zip(the_day, number_of_ferries))
for df in [vehicles, buses, trams, metros, cablecars, trains, ferries]:
df.columns = ['time', 'count']
return vehicles, buses, trams, metros, cablecars, trains, ferries
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--date", help="Animation day")
parser.add_argument("--apikey", help="Mapzen API Key")
parser.add_argument(
"--name",
help="Output directory name",
default="output"
)
parser.add_argument(
"--bbox",
help="Bounding box"
)
parser.add_argument(
"--clip_to_bbox",
help="Clip trips to bounding box",
action="store_true"
)
parser.add_argument(
"--frames",
help="Number of frames in animation. 3600 frames = 60 second animation.",
default=3600
)
parser.add_argument(
"--animate",
help="Generate processing sketch file.",
action="store_true"
)
parser.add_argument(
"--recording",
help="Records sketch to mp4",
action="store_true"
)
parser.add_argument(
"--skip_bearings",
help="Skip the calculate bearings between points step when concatenating csvs.",
action="store_true"
)
args = parser.parse_args()
if not args.date:
raise Exception('date required')
MAPZEN_APIKEY = args.apikey
OUTPUT_NAME = args.name
DATE = args.date
west, south, east, north = 0, 0, 0, 0 #null island!
FRAMES = args.frames
RECORDING = args.recording
#print ""
print("INPUTS:")
print("date: ", DATE)
print("name: ", OUTPUT_NAME)
print("API key: ", MAPZEN_APIKEY)
if args.bbox:
west, south, east, north = args.bbox.split(",")
# west, south, east, north = args.bbox.split(",")
# bbox = true
df = concatenate_csvs("sketches/{}/{}/data/indiv_operators".format(OUTPUT_NAME, DATE))
if not args.skip_bearings:
print("Calculating trip segment bearings.")
df['bearing'] = df.apply(lambda row: calc_bearing_between_points(row['start_lat'], row['start_lon'], row['end_lat'], row['end_lon']), axis=1)
if args.bbox and args.clip_to_bbox:
df = df[
((df['start_lat'] >= float(south)) & (df['start_lat'] <= float(north)) & (df['start_lon'] >= float(west)) & (df['start_lon'] <= float(east))) &
((df['end_lat'] >= float(south)) & (df['end_lat'] <= float(north)) & (df['end_lon'] >= float(west)) & (df['end_lon'] <= float(east)))
]
# Save to csv.
df.to_csv("sketches/{}/{}/data/output.csv".format(OUTPUT_NAME, DATE))
print("Total rows: ", df.shape[0])
print("Counting number of vehicles in transit.")
vehicles, buses, trams, metros, cablecars, trains, ferries = count_vehicles_on_screen(df, DATE, FRAMES)
print("Frames: ", FRAMES)
# ### Save vehicle counts to csv (3600 frame version)
# Our Processing sketch has 3,600 frames (at 60 frames per second makes
# a one minute video). One day has 5,760 15-second intervals. So to make
# things easy we will select the vehicle counts at 3,600 of the 15-second
# intervals throughout the day. We will select them randomly, but will
# maintain chronological order by sorting and also consistency between
# vehicle types by using a consitent set of random indices to select
# counts for different vehicle types.
random_indices = np.sort(np.random.choice(vehicles.index, int(FRAMES), replace=False))
vehicles_counts_output = vehicles.loc[random_indices].reset_index(drop=True)
vehicles_counts_output['frame'] = vehicles_counts_output.index
buses_counts_output = buses.loc[random_indices].reset_index(drop=True)
buses_counts_output['frame'] = buses_counts_output.index
trams_counts_output = trams.loc[random_indices].reset_index(drop=True)
trams_counts_output['frame'] = trams_counts_output.index
metros_counts_output = metros.loc[random_indices].reset_index(drop=True)
metros_counts_output['frame'] = metros_counts_output.index
cablecars_counts_output = cablecars.loc[random_indices].reset_index(drop=True)
cablecars_counts_output['frame'] = cablecars_counts_output.index
trains_counts_output = trains.loc[random_indices].reset_index(drop=True)
trains_counts_output['frame'] = trains_counts_output.index
ferries_counts_output = ferries.loc[random_indices].reset_index(drop=True)
ferries_counts_output['frame'] = ferries_counts_output.index
# Save these vehicle count stats to csv's.
if not os.path.exists("sketches/{}/{}/data/vehicle_counts".format(OUTPUT_NAME, DATE)):
os.makedirs("sketches/{}/{}/data/vehicle_counts".format(OUTPUT_NAME, DATE))
vehicles_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/vehicles_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
buses_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/buses_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
trams_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/trams_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
metros_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/metros_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
cablecars_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/cablecars_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
trains_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/trains_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
ferries_counts_output.to_csv("sketches/{}/{}/data/vehicle_counts/ferries_{}.csv".format(OUTPUT_NAME, DATE, FRAMES))
# Hacky way to center the sketch
if not args.bbox:
south, west, north, east = df['start_lat'][0], df['start_lon'][0], df['start_lat'][1], df['start_lon'][1]
## Use processing sketch template to create processing sketch file
if args.animate:
module_path = os.path.join(os.path.dirname(__file__))
template_path = os.path.join(module_path, 'templates', 'template.pde')
with open(template_path) as f:
data = f.read()
s = Template(data)
if not os.path.exists("sketches/{}/{}/sketch".format(OUTPUT_NAME, DATE)):
os.makedirs("sketches/{}/{}/sketch".format(OUTPUT_NAME, DATE))
for asset in ['calendar_icon.png', 'clock_icon.png']:
shutil.copyfile(
os.path.join(module_path, 'assets', asset),
os.path.join('sketches', OUTPUT_NAME, DATE, "sketch", asset)
)
with open("sketches/{}/{}/sketch/sketch.pde".format(OUTPUT_NAME, DATE), "w") as f:
f.write(
s.substitute(
DIRECTORY_NAME=OUTPUT_NAME,
DATE=DATE,
TOTAL_FRAMES=FRAMES,
RECORDING=str(RECORDING).lower(),
AVG_LAT=(float(south) + float(north))/2.0,
AVG_LON=(float(west) + float(east))/2.0
)
)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import uuid
import simplejson as json
import six
import prettytable
from troveclient.openstack.common.apiclient import exceptions
from troveclient.openstack.common import strutils
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs):
"""
returns the first environment variable set
if none are non-empty, defaults to '' or keyword arg default
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
def unauthenticated(f):
"""
Adds 'unauthenticated' attribute to decorated function.
Usage:
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def service_type(stype):
"""
Adds 'service_type' attribute to decorated function.
Usage:
@service_type('database')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""
Retrieves service type from function
"""
return getattr(f, 'service_type', None)
def translate_keys(collection, convert):
for item in collection:
keys = list(item.__dict__.keys())
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _output_override(objs, print_as):
"""
If an output override global flag is set, print with override
raise BaseException if no printing was overridden.
"""
if 'json_output' in globals() and json_output:
if print_as == 'list':
new_objs = []
for o in objs:
new_objs.append(o._info)
elif print_as == 'dict':
new_objs = objs
# pretty print the json
print(json.dumps(new_objs, indent=' '))
else:
raise BaseException('No valid output override')
def _print(pt, order):
if sys.version_info >= (3, 0):
print(pt.get_string(sortby=order))
else:
print(strutils.safe_encode(pt.get_string(sortby=order)))
def print_list(objs, fields, formatters={}, order_by=None, obj_is_dict=False):
try:
_output_override(objs, 'list')
return
except BaseException:
pass
mixed_case_fields = []
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not obj_is_dict:
data = getattr(o, field_name, '')
else:
data = o.get(field_name, '')
row.append(data)
pt.add_row(row)
if order_by is None:
order_by = fields[0]
_print(pt, order_by)
def print_dict(d, property="Property"):
try:
_output_override(d, 'dict')
return
except BaseException:
pass
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.aligns = ['l', 'l']
[pt.add_row(list(r)) for r in six.iteritems(d)]
_print(pt, property)
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
if sys.version_info <= (3, 0):
name_or_id = strutils.safe_decode(name_or_id)
# now try to get entity as uuid
try:
uuid.UUID(name_or_id)
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
try:
try:
return manager.find(human_id=name_or_id)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
try:
return manager.find(display_name=name_or_id)
except (UnicodeDecodeError, exceptions.NotFound):
try:
# Instances does not have name, but display_name
return manager.find(display_name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
# http://code.activestate.com/recipes/
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
Make use strutils.to_slug from openstack common
"""
return strutils.to_slug(value, incoming=None, errors="strict")
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
|
|
import prettytable
import discord
import os
import re
from urllib.parse import urlparse
from PythonGists import PythonGists
from discord.ext import commands
from cogs.utils.checks import embed_perms, cmd_prefix_len
'''Module for server commands.'''
class Server:
def __init__(self, bot):
self.bot = bot
self.invites = ['discord.gg/', 'discordapp.com/invite/']
self.invite_domains = ['discord.gg', 'discordapp.com']
def find_server(self, msg):
server = None
if msg:
try:
float(msg)
server = self.bot.get_guild(int(msg))
if not server:
return self.bot.bot_prefix + 'Server not found.', False
except:
for i in self.bot.guilds:
if i.name.lower() == msg.lower().strip():
server = i
break
if not server:
return self.bot.bot_prefix + 'Could not find server. Note: You must be a member of the server you are trying to search.', False
return server, True
# Stats about server
@commands.group(aliases=['server', 'sinfo', 'si'], pass_context=True, invoke_without_command=True)
async def serverinfo(self, ctx, *, msg=""):
"""Various info about the server. >help server for more info."""
if ctx.invoked_subcommand is None:
if msg:
server = None
try:
float(msg)
server = self.bot.get_guild(int(msg))
if not server:
return await ctx.send(
self.bot.bot_prefix + 'Server not found.')
except:
for i in self.bot.guilds:
if i.name.lower() == msg.lower():
server = i
break
if not server:
return await ctx.send(self.bot.bot_prefix + 'Could not find server. Note: You must be a member of the server you are trying to search.')
else:
server = ctx.message.guild
online = 0
for i in server.members:
if str(i.status) == 'online' or str(i.status) == 'idle' or str(i.status) == 'dnd':
online += 1
all_users = []
for user in server.members:
all_users.append('{}#{}'.format(user.name, user.discriminator))
all_users.sort()
all = '\n'.join(all_users)
channel_count = len([x for x in server.channels if type(x) == discord.channel.TextChannel])
role_count = len(server.roles)
emoji_count = len(server.emojis)
if embed_perms(ctx.message):
em = discord.Embed(color=0xea7938)
em.add_field(name='Name', value=server.name)
em.add_field(name='Owner', value=server.owner, inline=False)
em.add_field(name='Members', value=server.member_count)
em.add_field(name='Currently Online', value=online)
em.add_field(name='Text Channels', value=str(channel_count))
em.add_field(name='Region', value=server.region)
em.add_field(name='Verification Level', value=str(server.verification_level))
em.add_field(name='Highest role', value=server.role_hierarchy[0])
em.add_field(name='Number of roles', value=str(role_count))
em.add_field(name='Number of emotes', value=str(emoji_count))
url = PythonGists.Gist(description='All Users in: %s' % server.name, content=str(all), name='server.txt')
gist_of_users = '[List of all {} users in this server]({})'.format(server.member_count, url)
em.add_field(name='Users', value=gist_of_users)
em.add_field(name='Created At', value=server.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'))
em.set_thumbnail(url=server.icon_url)
em.set_author(name='Server Info', icon_url='https://i.imgur.com/RHagTDg.png')
em.set_footer(text='Server ID: %s' % server.id)
await ctx.send(embed=em)
else:
msg = '**Server Info:** ```Name: %s\nOwner: %s\nMembers: %s\nCurrently Online: %s\nRegion: %s\nVerification Level: %s\nHighest Role: %s\nDefault Channel: %s\nCreated At: %s\nServer avatar: : %s```' % (
server.name, server.owner, server.member_count, online, server.region, str(server.verification_level), server.role_hierarchy[0], server.default_channel, server.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'), server.icon_url)
await ctx.send(self.bot.bot_prefix + msg)
await ctx.message.delete()
@serverinfo.command(pass_context=True)
async def emojis(self, ctx, msg: str = None):
"""List all emojis in this server. Ex: >server emojis"""
if msg:
server, found = self.find_server(msg)
if not found:
return await ctx.send(server)
else:
server = ctx.message.guild
emojis = [str(x) for x in server.emojis]
await ctx.send("".join(emojis))
await ctx.message.delete()
@serverinfo.command(pass_context=True)
async def avi(self, ctx, msg: str = None):
"""Get server avatar image link."""
if msg:
server, found = self.find_server(msg)
if not found:
return await ctx.send(server)
else:
server = ctx.message.guild
if embed_perms(ctx.message):
em = discord.Embed()
em.set_image(url=server.icon_url)
await ctx.send(embed=em)
else:
await ctx.send(self.bot.bot_prefix + server.icon_url)
await ctx.message.delete()
@serverinfo.command(pass_context=True)
async def role(self, ctx, *, msg):
"""Get more info about a specific role. Ex: >server role Admins"""
for role in ctx.message.guild.roles:
if msg.lower() == role.name.lower() or msg == role.id:
all_users = [str(x) for x in role.members]
all_users.sort()
all_users = ', '.join(all_users)
em = discord.Embed(title='Role Info', color=role.color)
em.add_field(name='Name', value=role.name)
em.add_field(name='ID', value=role.id, inline=False)
em.add_field(name='Users in this role', value=str(len(role.members)))
em.add_field(name='Role color hex value', value=str(role.color))
em.add_field(name='Role color RGB value', value=role.color.to_rgb())
em.add_field(name='Mentionable', value=role.mentionable)
if len(role.members) > 10:
all_users = all_users.replace(', ', '\n')
url = PythonGists.Gist(description='Users in role: {} for server: {}'.format(role.name, ctx.message.guild.name), content=str(all_users), name='role.txt')
em.add_field(name='All users', value='{} users. [List of users posted to Gist.]({})'.format(len(role.members), url), inline=False)
else:
em.add_field(name='All users', value=all_users, inline=False)
em.add_field(name='Created at', value=role.created_at.__format__('%x at %X'))
em.set_thumbnail(url='http://www.colorhexa.com/%s.png' % str(role.color).strip("#"))
await ctx.message.delete()
return await ctx.send(content=None, embed=em)
await ctx.message.delete()
await ctx.send(self.bot.bot_prefix + 'Could not find role ``%s``' % msg)
@commands.command(aliases=['channel', 'cinfo', 'ci'], pass_context=True, no_pm=True)
async def channelinfo(self, ctx, *, channel: discord.channel=None):
"""Shows channel informations"""
if not channel:
channel = ctx.message.channel
# else:
# channel = ctx.message.guild.get_channel(int(chan))
# if not channel: channel = self.bot.get_channel(int(chan))
data = discord.Embed()
content = None
if hasattr(channel, 'mention'):
content = self.bot.bot_prefix+"**Informations about Channel:** "+channel.mention
if hasattr(channel, 'changed_roles'):
if len(channel.changed_roles) > 0:
data.color = discord.Colour.green() if channel.changed_roles[0].permissions.read_messages else discord.Colour.red()
if isinstance(channel, discord.TextChannel): _type = "Text"
elif isinstance(channel, discord.VoiceChannel): _type = "Voice"
else: _type = "Unknown"
data.add_field(name="Type", value=_type)
data.add_field(name="ID", value=channel.id, inline=False)
if hasattr(channel, 'position'):
data.add_field(name="Position", value=channel.position)
if isinstance(channel, discord.VoiceChannel):
if channel.user_limit != 0:
data.add_field(name="User Number", value="{}/{}".format(len(channel.voice_members), channel.user_limit))
else:
data.add_field(name="User Number", value="{}".format(len(channel.voice_members)))
userlist = [r.display_name for r in channel.members]
if not userlist:
userlist = "None"
else:
userlist = "\n".join(userlist)
data.add_field(name="Users", value=userlist)
data.add_field(name="Bitrate", value=channel.bitrate)
elif isinstance(channel, discord.TextChannel):
if channel.members:
data.add_field(name="Members", value="%s"%len(channel.members))
if channel.topic:
data.add_field(name="Topic", value=channel.topic, inline=False)
_hidden = []; _allowed = []
for role in channel.changed_roles:
if role.permissions.read_messages: _allowed.append(role.mention)
else: _hidden.append(role.mention)
if len(_allowed) > 0: data.add_field(name='Allowed Roles (%s)'%len(_allowed), value=', '.join(_allowed), inline=False)
if len(_hidden) > 0: data.add_field(name='Restricted Roles (%s)'%len(_hidden), value=', '.join(_hidden), inline=False)
if channel.created_at:
data.set_footer(text=("Created on {} ({} days ago)".format(channel.created_at.strftime("%d %b %Y %H:%M"), (ctx.message.created_at - channel.created_at).days)))
# try:
await ctx.send(content if content else None, embed=data)
# except:
# await ctx.send(self.bot.bot_prefix+"I need the `Embed links` permission to send this")
@commands.command(aliases=['invitei', 'ii'], pass_context=True)
async def inviteinfo(self, ctx, *, invite: str = None):
"""Shows invite informations."""
if invite:
for url in re.findall(r'(https?://\S+)', invite):
invite = await self.bot.get_invite(urlparse(url).path.replace('/', '').replace('<', '').replace('>', ''))
break
else:
async for msg in ctx.message.channel.history():
if any(x in msg.content for x in self.invites):
for url in re.findall(r'(https?://\S+)', msg.content):
url = urlparse(url)
if any(x in url for x in self.invite_domains):
print(url)
url = url.path.replace('/', '').replace('<', '').replace('>', '').replace('\'', '').replace(')', '')
print(url)
invite = await self.bot.get_invite(url)
break
if not invite:
await ctx.send(content="Could not find any invite in the last 100 messages. Please specify invite manually.")
data = discord.Embed()
content = None
if invite.id is not None:
content = self.bot.bot_prefix + "**Informations about Invite:** %s" % invite.id
if invite.revoked is not None:
data.colour = discord.Colour.red() if invite.revoked else discord.Colour.green()
if invite.created_at is not None:
data.set_footer(text="Created on {} ({} days ago)".format(invite.created_at.strftime("%d %b %Y %H:%M"), (invite.created_at - invite.created_at).days))
if invite.max_age is not None:
if invite.max_age > 0:
expires = '%s s' % invite.max_age
else:
expires = "Never"
data.add_field(name="Expires in", value=expires)
if invite.temporary is not None:
data.add_field(name="Temp membership", value="Yes" if invite.temporary else "No")
if invite.uses is not None:
data.add_field(name="Uses", value="%s / %s" % (invite.uses, invite.max_uses))
if invite.inviter.name is not None:
data.set_author(name=invite.inviter.name + '#' + invite.inviter.discriminator + " (%s)" % invite.inviter.id, icon_url=invite.inviter.avatar_url)
if invite.guild.name is not None:
data.add_field(name="Guild", value="Name: " + invite.guild.name + "\nID: %s" % invite.guild.id, inline=False)
if invite.guild.icon_url is not None:
data.set_thumbnail(url=invite.guild.icon_url)
if invite.channel.name is not None:
channel = "%s\n#%s" % (invite.channel.mention, invite.channel.name) if isinstance(invite.channel, discord.TextChannel) else invite.channel.name
data.add_field(name="Channel", value="Name: " + channel + "\nID: %s" % invite.channel.id, inline=False)
try:
await ctx.send(content=content, embed=data)
except:
await ctx.send(content="I need the `Embed links` permission to send this")
def setup(bot):
bot.add_cog(Server(bot))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.