code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
A series of tests to establish that the command-line managment tools work as
advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE
and default settings.py files.
"""
from __future__ import unicode_literals
import os
import re
import shutil
import socket
import subprocess
import sys
import codecs
from django import conf, bin, get_version
from django.conf import settings
from django.db import connection
from django.test.simple import DjangoTestSuiteRunner
from django.utils import unittest
from django.utils.encoding import force_str, force_text
from django.utils._os import upath
from django.test import LiveServerTestCase
test_dir = os.path.dirname(os.path.dirname(upath(__file__)))
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None, is_dir=False, sdict=None):
test_dir = os.path.dirname(os.path.dirname(upath(__file__)))
if is_dir:
settings_dir = os.path.join(test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by regressiontests.admin_scripts test case\n')
exports = [
'DATABASES',
'ROOT_URLCONF',
'SECRET_KEY',
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, dict):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'regressiontests.admin_scripts']
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
def remove_settings(self, filename, is_dir=False):
full_name = os.path.join(test_dir, filename)
if is_dir:
shutil.rmtree(full_name)
else:
os.remove(full_name)
# Also try to remove the compiled file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
if sys.platform.startswith('java'):
# Jython produces module$py.class files
os.remove(re.sub(r'\.py$', '$py.class', full_name))
else:
# CPython produces module.pyc files
os.remove(full_name + 'c')
except OSError:
pass
# Also remove a __pycache__ directory, if it exists
cache_name = os.path.join(test_dir, '__pycache__')
if os.path.isdir(cache_name):
shutil.rmtree(cache_name)
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
first_package_re = re.compile(r'(^[^\.]+)\.')
for backend in settings.DATABASES.values():
result = first_package_re.findall(backend['ENGINE'])
if result and result != 'django':
backend_pkg = __import__(result[0])
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
test_dir = os.path.dirname(os.path.dirname(__file__))
project_dir = os.path.dirname(test_dir)
base_dir = os.path.dirname(project_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Remember the old environment
old_django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE', None)
if sys.platform.startswith('java'):
python_path_var_name = 'JYTHONPATH'
else:
python_path_var_name = 'PYTHONPATH'
old_python_path = os.environ.get(python_path_var_name, None)
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_file
elif 'DJANGO_SETTINGS_MODULE' in os.environ:
del os.environ['DJANGO_SETTINGS_MODULE']
python_path = [project_dir, base_dir]
python_path.extend(ext_backend_base_dirs)
os.environ[python_path_var_name] = os.pathsep.join(python_path)
# Move to the test directory and run
os.chdir(test_dir)
out, err = subprocess.Popen([sys.executable, script] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True).communicate()
# Restore the old environment
if old_django_settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = old_django_settings_module
if old_python_path:
os.environ[python_path_var_name] = old_python_path
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
bin_dir = os.path.abspath(os.path.dirname(upath(bin.__file__)))
return self.run_test(os.path.join(bin_dir, 'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
def safe_remove(path):
try:
os.remove(path)
except OSError:
pass
conf_dir = os.path.dirname(upath(conf.__file__))
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_manage_py = os.path.join(test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
with open(test_manage_py, 'r') as fp:
manage_py_contents = fp.read()
manage_py_contents = manage_py_contents.replace(
"{{ project_name }}", "regressiontests")
with open(test_manage_py, 'w') as fp:
fp.write(manage_py_contents)
self.addCleanup(safe_remove, test_manage_py)
return self.run_test('./manage.py', args, settings_file)
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg):
"Utility assertion: assert that the given message exists in the output"
stream = force_text(stream)
self.assertTrue(msg in stream, "'%s' does not match actual output text '%s'" % (msg, stream))
def assertNotInOutput(self, stream, msg):
"Utility assertion: assert that the given message doesn't exist in the output"
stream = force_text(stream)
self.assertFalse(msg in stream, "'%s' matches actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'regressiontests.admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'regressiontests.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'regressiontests.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'regressiontests.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'regressiontests.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
self.write_settings('settings', is_dir=True)
def tearDown(self):
self.remove_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
args = ['startapp', 'settings_test']
app_path = os.path.join(test_dir, 'settings_test')
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
def test_setup_environ_custom_template(self):
"directory: startapp creates the correct directory with a custom template"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'app_template')
args = ['startapp', '--template', template_path, 'custom_settings_test']
app_path = os.path.join(test_dir, 'custom_settings_test')
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py')))
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_django_admin(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'regressiontests.settings'")
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'regressiontests.admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'regressiontests.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['sqlall', '--settings=regressiontests.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'regressiontests.settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=regressiontests.settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'regressiontests.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an error when no default settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'regressiontests.settings'")
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands work with settings provided as argument"
args = ['sqlall', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
expected = ('create table %s'
% connection.ops.quote_name('admin_scripts_article'))
self.assertTrue(expected.lower() in out.lower())
self.assertNoOutput(err)
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands work if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
expected = ('create table %s'
% connection.ops.quote_name('admin_scripts_article'))
self.assertTrue(expected.lower() in out.lower())
self.assertNoOutput(err)
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: manage.py can't execute user commands without settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'regressiontests.settings'")
def test_custom_command_with_settings(self):
"alternate: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(out, "EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
self.assertNoOutput(err)
def test_custom_command_with_environment(self):
"alternate: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, "EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
self.assertNoOutput(err)
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an error when no settings provided"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found.')
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"multiple: manage.py can execute builtin commands if settings are provided in the environment"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageSettingsWithImportError(AdminScriptTestCase):
"""Tests for manage.py when using the default settings.py file
with an import error. Ticket #14130.
"""
def tearDown(self):
self.remove_settings('settings.py')
def write_settings_with_import_error(self, filename, apps=None, is_dir=False, sdict=None):
if is_dir:
settings_dir = os.path.join(test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by regressiontests.admin_scripts test case\n')
settings_file.write('# The next line will cause an import error:\nimport foo42bar\n')
def test_builtin_command(self):
"""
import error: manage.py builtin commands shows useful diagnostic info
when settings with import errors is provided
"""
self.write_settings_with_import_error('settings.py')
args = ['sqlall', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named")
self.assertOutput(err, "foo42bar")
def test_builtin_command_with_attribute_error(self):
"""
manage.py builtin commands does not swallow attribute errors from bad settings (#18845)
"""
self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'")
class ManageValidate(AdminScriptTestCase):
def tearDown(self):
self.remove_settings('settings.py')
def test_nonexistent_app(self):
"manage.py validate reports an error on a non-existent app in INSTALLED_APPS"
self.write_settings('settings.py', apps=['admin_scriptz.broken_app'], sdict={'USE_I18N': False})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'No module named')
self.assertOutput(err, 'admin_scriptz')
def test_broken_app(self):
"manage.py validate reports an ImportError if an app's models.py raises one on import"
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
"manage.py validate does not raise an ImportError validating a complex app with nested calls to load_app"
self.write_settings('settings.py',
apps=['admin_scripts.complex_app', 'admin_scripts.simple_app'],
sdict={'DEBUG': True})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, '0 errors found')
def test_app_with_import(self):
"manage.py validate does not raise errors when an app imports a base class that itself has an abstract base"
self.write_settings('settings.py',
apps=['admin_scripts.app_with_import',
'django.contrib.comments',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites'],
sdict={'DEBUG': True})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, '0 errors found')
class CustomTestRunner(DjangoTestSuiteRunner):
def __init__(self, *args, **kwargs):
assert 'liveserver' not in kwargs
super(CustomTestRunner, self).__init__(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
pass
class ManageTestCommand(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.test import Command as TestCommand
self.cmd = TestCommand()
def test_liveserver(self):
"""
Ensure that the --liveserver option sets the environment variable
correctly.
Refs #2879.
"""
# Backup original state
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
self.cmd.handle(verbosity=0, testrunner='regressiontests.admin_scripts.tests.CustomTestRunner')
# Original state hasn't changed
self.assertEqual('DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ, address_predefined)
self.assertEqual(os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS'), old_address)
self.cmd.handle(verbosity=0, testrunner='regressiontests.admin_scripts.tests.CustomTestRunner',
liveserver='blah')
# Variable was correctly set
self.assertEqual(os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'], 'blah')
# Restore original state
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
class ManageRunserver(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.runserver import Command
def monkey_run(*args, **options):
return
self.cmd = Command()
self.cmd.run = monkey_run
def assertServerSettings(self, addr, port, ipv6=None, raw_ipv6=False):
self.assertEqual(self.cmd.addr, addr)
self.assertEqual(self.cmd.port, port)
self.assertEqual(self.cmd.use_ipv6, ipv6)
self.assertEqual(self.cmd._raw_ipv6, raw_ipv6)
def test_runserver_addrport(self):
self.cmd.handle()
self.assertServerSettings('127.0.0.1', '8000')
self.cmd.handle(addrport="1.2.3.4:8000")
self.assertServerSettings('1.2.3.4', '8000')
self.cmd.handle(addrport="7000")
self.assertServerSettings('127.0.0.1', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
self.cmd.handle(addrport="", use_ipv6=True)
self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="7000", use_ipv6=True)
self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="[2001:0db8:1234:5678::9]:7000")
self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True)
def test_runner_hostname(self):
self.cmd.handle(addrport="localhost:8000")
self.assertServerSettings('localhost', '8000')
self.cmd.handle(addrport="test.domain.local:7000")
self.assertServerSettings('test.domain.local', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_hostname_ipv6(self):
self.cmd.handle(addrport="test.domain.local:7000", use_ipv6=True)
self.assertServerSettings('test.domain.local', '7000', ipv6=True)
def test_runner_ambiguous(self):
# Only 4 characters, all of which could be in an ipv6 address
self.cmd.handle(addrport="beef:7654")
self.assertServerSettings('beef', '7654')
# Uses only characters that could be in an ipv6 address
self.cmd.handle(addrport="deadbeef:7654")
self.assertServerSettings('deadbeef', '7654')
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"version is handled as a special case"
args = ['version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, get_version())
def test_version_alternative(self):
"--version is equivalent to version"
args1, args2 = ['version'], ['--version']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help(self):
"help is handled as a special case"
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, "Usage: manage.py subcommand [options] [args]")
self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
self.assertOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
def test_help_commands(self):
"help --commands shows the list of all available commands"
args = ['help', '--commands']
out, err = self.run_manage(args)
self.assertNotInOutput(out, 'Usage:')
self.assertNotInOutput(out, 'Options:')
self.assertNotInOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
self.assertNotInOutput(out, '\n\n')
def test_help_alternative(self):
"--help is equivalent to help"
args1, args2 = ['help'], ['--help']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help_short_altert(self):
"-h is handled as a short form of --help"
args1, args2 = ['--help'], ['-h']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_specific_help(self):
"--help can be used on a specific command"
args = ['sqlall', '--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Prints the CREATE TABLE, custom SQL and CREATE INDEX SQL statements for the given model module name(s).")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=(), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel', 'anotherlabel'), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command', 'testlabel', '--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command', 'argument']
out, err = self.run_manage(args)
self.assertOutput(err, "Error: Command doesn't accept any arguments")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, os.sep.join(['django', 'contrib', 'auth', 'models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Error: Enter at least one appname.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command', 'auth', 'contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, os.sep.join(['django', 'contrib', 'auth', 'models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.contenttypes.models'")
self.assertOutput(out, os.sep.join(['django', 'contrib', 'contenttypes', 'models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_app_command_invalid_appname(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_app_command_some_invalid_appnames(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
self.assertOutput(out, "EXECUTE:LabelCommand label=anotherlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a Lax parser.
This Lax parser ignores any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
"Options passed after settings are correctly handled"
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_setting_then_short_option(self):
"Short options passed after settings are correctly handled"
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_option_then_setting(self):
"Options passed before settings are correctly handled"
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_short_option_then_setting(self):
"Short options passed before settings are correctly handled"
args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_option_then_setting_then_option(self):
"Options are correctly handled when they are passed before and after a setting"
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
class StartProject(LiveServerTestCase, AdminScriptTestCase):
def test_wrong_args(self):
"Make sure passing the wrong kinds of arguments raises a CommandError"
out, err = self.run_django_admin(['startproject'])
self.assertNoOutput(out)
self.assertOutput(err, "you must provide a project name")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ['startproject', 'testproject']
testproject_dir = os.path.join(test_dir, 'testproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_invalid_project_name(self):
"Make sure the startproject management command validates a project name"
args = ['startproject', '7testproject']
testproject_dir = os.path.join(test_dir, '7testproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertOutput(err, "Error: '7testproject' is not a valid project name. Please make sure the name begins with a letter or underscore.")
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"Make sure the startproject management command creates a project in a specific directory"
args = ['startproject', 'testproject', 'othertestproject']
testproject_dir = os.path.join(test_dir, 'othertestproject')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py')))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_custom_project_template(self):
"Make sure the startproject management command is able to use a different project template"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template' + os.sep)
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_custom_project_template_from_tarball_by_path(self):
"Make sure the startproject management command is able to use a different project template from a tarball"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject']
testproject_dir = os.path.join(test_dir, 'tarballtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"Startproject can use a project template from a tarball and create it in a specified location"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation']
testproject_dir = os.path.join(test_dir, 'altlocation')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_by_url(self):
"Make sure the startproject management command is able to use a different project template from a tarball via a url"
template_url = '%s/admin_scripts/custom_templates/project_template.tgz' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_project_template_tarball_url(self):
"Startproject management command handles project template tar/zip balls from non-canonical urls"
template_url = '%s/admin_scripts/custom_templates/project_template.tgz/' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_file_without_extension(self):
"Make sure the startproject management command is able to render custom files"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
base_path = os.path.join(testproject_dir, 'additional_dir')
for f in ('Procfile', 'additional_file.py', 'requirements.txt'):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(fh.read(),
'# some file for customtestproject test project')
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template')
args = ['startproject', '--template', template_path, 'another_project', 'project_dir']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'manage.py')
with open(test_manage_py, 'r') as fp:
content = force_text(fp.read())
self.assertIn("project_name = 'another_project'", content)
self.assertIn("project_directory = '%s'" % testproject_dir, content)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings('alternate_settings.py')
self.addCleanup(self.remove_settings, 'alternate_settings.py')
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template')
args = ['custom_startproject', '--template', template_path, 'another_project', 'project_dir', '--extra', '<&>', '--settings=alternate_settings']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py')
with open(test_manage_py, 'r') as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Make sure an exception is raised when the provided
destination directory doesn't exist
"""
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template')
args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2']
testproject_dir = os.path.join(test_dir, 'project_dir2')
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir)
self.assertFalse(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"Ticket 18091: Make sure the startproject management command is able to render templates with non-ASCII content"
template_path = os.path.join(test_dir, 'admin_scripts', 'custom_templates', 'project_template')
args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt')
with codecs.open(path, 'r', 'utf-8') as f:
self.assertEqual(f.read(),
'Some non-ASCII text for testing ticket #18091:\nüäö €\n')
class DiffSettings(AdminScriptTestCase):
"""Tests for diffsettings management command."""
def test_basic(self):
"Runs without error and emits settings diff."
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "FOO = 'bar' ###")
|
blaze33/django
|
tests/regressiontests/admin_scripts/tests.py
|
Python
|
bsd-3-clause
| 77,347
|
# -*- coding: utf-8 -*-
VERSION = (0, 3, 181, 'final')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
default_app_config = 'djconnectwise.apps.DjangoConnectwiseConfig'
|
KerkhoffTechnologies/django-connectwise
|
djconnectwise/__init__.py
|
Python
|
mit
| 295
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
class clv_document(models.Model):
_inherit = 'clv_document'
_defaults = {
'active_history': True,
}
|
CLVsol/odoo_clvhealth_jcafb
|
clvhealth_jcafb/history/clv_document.py
|
Python
|
agpl-3.0
| 1,571
|
"""
fstab - file ``/etc/fstab``
===========================
Parse the ``/etc/fstab`` file into a list of lines. Each line is a dictionary
of fields, named according to their definitions in ``man fstab``:
* ``fs_spec`` - the device to mount
* ``fs_file`` - the mount point
* ``fs_vfstype`` - the type of file system
* ``raw_fs_mntops`` - the mount options as a string
* ``fs_mntops`` - the mount options as a dictionary
* ``fs_freq`` - the dump frequency
* ``fs_passno`` - check the filesystem on reboot in this pass number
* ``raw`` - the RAW line which is useful to front-end
``fs_freq`` and ``fs_passno`` are recorded as integers if found, and zero if
not present.
The ``fs_mntops`` mount options are converted to a dictionary, so that each
option's value set to True so it can be conveniently searched.
This data, as above, is available in the ``data`` property:
* As wrapped as an AttributeDict, each column can also be accessed as a property
with the same name.
* The mount options are also an AttributeDict object with properties
corresponding to the common mount options.
The data for each mount point is also available via the ``mounted_on``
property; the data is the same as that stored in the ``data`` list.
Typical content of the ``fstab`` looks like::
#
# /etc/fstab
# Created by anaconda on Fri May 6 19:51:54 2016
#
/dev/mapper/rhel_hadoop--test--1-root / xfs defaults 0 0
UUID=2c839365-37c7-4bd5-ac47-040fba761735 /boot xfs defaults 0 0
/dev/mapper/rhel_hadoop--test--1-home /home xfs defaults 0 0
/dev/mapper/rhel_hadoop--test--1-swap swap swap defaults 0 0
/dev/sdb1 /hdfs/data1 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0
/dev/sdc1 /hdfs/data2 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0
/dev/sdd1 /hdfs/data3 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0
localhost:/ /mnt/hdfs nfs rw,vers=3,proto=tcp,nolock,timeo=600 0 0
/dev/mapper/vg0-lv2 /test1 ext4 defaults,data=writeback 1 1
nfs_hostname.redhat.com:/nfs_share/data /srv/rdu/cases/000 nfs ro,defaults,hard,intr,bg,noatime,nodev,nosuid,nfsvers=3,tcp,rsize=32768,wsize=32768 0
Examples:
>>> fstab = shared[FSTab]
>>> len(fstab)
9
>>> fstab.data[0]['fs_spec'] # Note that data is a list not a dict here
'/dev/mapper/rhel_hadoop--test--1-root'
>>> fstab.data[0].fs_spec
'/dev/mapper/rhel_hadoop--test--1-root'
>>> fstab.data[0].raw
'/dev/mapper/rhel_hadoop--test--1-root / xfs defaults 0 0'
>>> fstab.data[0].fs_mntops.defaults
True
>>> 'relatime' in fstab.data[0].fs_mntops
False
>>> fstab.data[0].fs_mntops.get('relatime')
None
>>> fstab.mounted_on['/hdfs/data3'].fs_spec
'/dev/sdd1'
"""
from collections import namedtuple
from .. import Parser, parser, get_active_lines, AttributeDict
from ..parsers import optlist_to_dict, parse_delimited_table, keyword_search
from insights.specs import fstab
FS_HEADINGS = "fs_spec fs_file fs_vfstype raw_fs_mntops fs_freq fs_passno"
type_info = namedtuple('type_info', field_names=['type', 'default'])
@parser(fstab)
class FSTab(Parser):
"""
Parse the content of ``/etc/fstab``.
This object provides the '__len__' and '__iter__' methods to allow it to
be used as a list to iterate over the ``data`` data, e.g.::
>>> if len(fstab) > 0:
>>> for fs in fstab:
>>> print fs.fs_file
>>> print fs.raw
Attributes:
data (list): a list of parsed fstab entries as AttributeDict objects.
mounted_on (dict): a dictionary of AttributeDict objects keyed on mount
point.
"""
def __len__(self):
return len(self.data)
def __iter__(self):
for row in self.data:
yield row
def parse_content(self, content):
"""
Parse each line in the file ``/etc/fstab``.
"""
fstab_output = parse_delimited_table([FS_HEADINGS] + get_active_lines(content))
self.data = []
for line in fstab_output:
line['fs_freq'] = int(line['fs_freq']) if 'fs_freq' in line else 0
line['fs_passno'] = int(line['fs_passno']) if 'fs_passno' in line else 0
# optlist_to_dict converts 'key=value' to key: value and
# 'key' to key: True
line['fs_mntops'] = AttributeDict(optlist_to_dict(line['raw_fs_mntops']))
# add `raw` here for displaying convenience on front-end
line['raw'] = [l for l in content if l.startswith(line['fs_spec'])][0]
self.data.append(AttributeDict(line))
# assert: all mount points of valid entries are unique by definition
self.mounted_on = dict((row.fs_file, row) for row in self.data)
def search(self, **kwargs):
"""
Search for the given key/value pairs in the data. Please refer to the
:py:meth:`insights.parsers.keyword_search` function documentation for
a more complete description of how to use this.
Fields that can be searched (as per ``man fstab``):
* ``fs_spec``: the block special or remote filesystem path or label.
* ``fs_file``: The mount point for the filesystem.
* ``fs_vfstype``: The file system type.
* ``fs_mntops``: The mount options. Since this is also a dictionary,
this can be searched using __contains - see the examples below.
* ``fs_freq``: The dump frequency - rarely used.
* ``fs_passno``: The pass for file system checks - rarely used.
Examples:
Search for the root file system:
``fstab.search(fs_file='/')``
Search for file systems mounted from a LABEL declaration
``fstab.search(fs_spec__startswith='LABEL=')``
Search for file systems that use the 'uid' mount option:
``fstab.search(fs_mntops__contains='uid')``
Search for XFS file systems using the 'relatime' option:
``fstab.search(fs_vfstype='xfs', fs_mntops__contains='relatime')``
"""
return keyword_search(self.data, **kwargs)
|
wcmitchell/insights-core
|
insights/parsers/fstab.py
|
Python
|
apache-2.0
| 6,335
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for merge layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class MergeLayersTest(test.TestCase):
def test_merge_add(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.add([i1, i2, i3])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
# test masking
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Add()
o = layer([m1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
mask = layer.output_mask
self.assertListEqual(mask.get_shape().as_list(), [None, 4])
# test missing shape
i1 = array_ops.placeholder(shape=(4, None), dtype='float32')
i2 = array_ops.placeholder(shape=(4, 5), dtype='float32')
layer = keras.layers.Add()
o = layer([i1, i2])
def test_merge_elementwise_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.add([i1, i2])
with self.assertRaises(ValueError):
keras.layers.add([i1])
with self.assertRaises(ValueError):
keras.layers.add(i1)
with self.assertRaises(ValueError):
keras.layers.add([i1])
def test_merge_multiply(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.multiply([i1, i2, i3])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
def test_merge_average(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.average([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
def test_merge_maximum(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.maximum([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
def test_merge_minimum(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.minimum([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4)
def test_merge_concatenate(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.concatenate([i1, i2], axis=1)
self.assertListEqual(o.get_shape().as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
# test masking
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Concatenate()
o = layer([m1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 10])
mask = layer.output_mask
self.assertListEqual(mask.get_shape().as_list(), [None, 4])
def test_concatenate_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i2], axis=-1)
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate(i1, axis=-1)
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate([i1], axis=-1)
def test_merge_dot(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
o = keras.layers.dot([i1, i2], axes=1)
self.assertListEqual(o.get_shape().as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
_ = keras.layers.Dot(axes=1).get_config()
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
self.assertAllClose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = keras.layers.dot([i1, i2], axes=(-1, -1))
self.assertListEqual(o.get_shape().as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)
# test _compute_output_shape
layer = keras.layers.Dot(axes=-1)
self.assertEqual(layer._compute_output_shape([(4, 5), (4, 5)]), (4, 1))
def test_dot_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
i3 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot(i1, axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2, i3], axes=-1)
with self.assertRaises(ValueError):
dot = keras.layers.Dot(1)
dot._compute_output_shape(1)
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
y = keras.layers.subtract([i1, i2])
self.assertEqual(y.get_shape().as_list(), [None, 4, 5])
# Test invalid use cases
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i2])
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i1, i1])
if __name__ == '__main__':
test.main()
|
eadgarchen/tensorflow
|
tensorflow/python/keras/_impl/keras/layers/merge_test.py
|
Python
|
apache-2.0
| 8,416
|
#!/usr/bin/python
# This file is part of Morse.
#
# Morse is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Morse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Morse. If not, see <http://www.gnu.org/licenses/>.
from . import db
from iptools import IpRange
from datetime import datetime, timedelta
from core import Board, User
class Ban (db.Model):
""" Ban is an abstract model for IPBan and UserBan. It provides
methods to check for affected boards and some to get different parts
of the ban duration """
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
reason = db.Column(db.String)
duration = db.Column(db.Interval)
expiration_date = db.Column(db.DateTime)
def __init__ (self, reason, duration_in_days = None):
self.reason = reason
if duration_in_days:
self.duration = timedelta(days = duration_in_days)
self.expiration_date = datetime.now() + self.duration
def applies_to (self, board):
""" signifies whether a ban applies to a certain board """
affected = self.affected_board_ids
return board.id in affected
@property
def affected_boards (self):
""" a list of all affected boards """
for board_id in self.affected_board_ids:
yield Board.query.get(board_id)
@property
def is_permanent (self):
return self.expiration_date is None
def update_duration_in_days (self, duration):
if duration is None:
self.duration = None
self.expiration_date = None
else:
if self.is_permanent:
old_beginning = datetime.now()
else:
old_beginning = self.expiration_date - self.duration
self.duration = timedelta(days = duration)
self.expiration_date = old_beginning + self.duration
duration_in_days = property(fset = update_duration_in_days)
@property
def has_expired (self):
if self.is_permanent:
return False
return self.expiration_date < datetime.now()
@property
def percentage_of_time_served (self):
if self.is_permanent:
return 0
if self.has_expired:
return 100
served = self.time_served
served_in_seconds = served.days * 24 * 60**2 + served.seconds
duration = self.duration
duration_in_seconds = duration.days * 24 * 60**2 + duration.seconds
percentage = (100 * served_in_seconds) / duration_in_seconds
return percentage
@property
def percentage_of_time_left (self):
return 100 - self.percentage_of_time_served
@property
def time_served (self):
""" a timedelta object that signifies the
served time (only possible on limited bans) """
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
return self.duration - self.time_left
@property
def time_left (self):
""" a timedelta object that signifies the
time left to serve (only possible on limited bans) """
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
return self.expiration_date - datetime.now()
@property
def days_left (self):
""" an integer that signifies the number of days
left to serve (only possible on limited bans) """
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
return self.time_left.days
@property
def hours_left (self):
""" an integer that signifies the number of hours
left to serve (only possible on limited bans)
!!! this attribute DOES NOT signify the absolute
number of hours left, but rather the numbers of
hours left modulo 24
"""
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
seconds = self.time_left.seconds
return seconds // 60**2
@property
def minutes_left (self):
""" an integer that signifies the number of minutes
left to serve (only possible on limited bans)
!!! this attribute DOES NOT signify the absolute
number of minutes left, but rather the numbers of
minutes left modulo 60
"""
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
seconds = self.time_left.seconds
seconds_without_hours = seconds % 60**2
return seconds_without_hours // 60
class IPBan (Ban):
""" model for IP bans """
__tablename__ = "ip_bans"
ip_range = db.Column(db.String)
def __init__ (self, ip_range, reason, duration_in_days = None):
Ban.__init__(self, reason, duration_in_days)
self.ip_range = ip_range
@property
def affected_ips (self):
""" use this property instead of ip_range. it provides a
iptools.IpRange object instead of a simple string, which
means you can perform containment operations (e.g.
"my_ip in ban.ip_range" and the like) """
return IpRange(self.ip_range)
@property
def affected_board_ids (self):
""" an ID list of all affected boards """
query = IPBannedOn.query
query = query.filter(IPBannedOn.ban_id == self.id)
board_id_generator = query.values(IPBannedOn.board_id)
board_ids = [oneple[0] for oneple in board_id_generator]
return board_ids
class IPBannedOn (db.Model):
""" A relation between ip bans and boards, that signify
which boards are affected by a certain ip ban """
__tablename__ = "ip_banned_on"
ban_id = db.Column(db.Integer, primary_key=True)
board_id = db.Column(db.Integer, primary_key=True)
def __init__ (self, board_id, ban_id):
self.board_id = board_id
self.ban_id = ban_id
class UserBan (Ban):
""" model for user bans """
__tablename__ = "user_bans"
user_id = db.Column(db.ForeignKey("users.id"))
def __init__ (self, user_id, reason, duration_in_days = None):
Ban.__init__(self, reason, duration_in_days)
self.user_id, user_id
@property
def affected_user (self):
return User.query.get(self.user_id)
@property
def affected_board_ids (self):
""" an ID list of all affected boards """
query = UserBannedOn.query
query = query.filter(UserBannedOn.ban_id == self.id)
board_id_generator = query.values(UserBannedOn.board_id)
board_ids = [oneple[0] for oneple in board_id_generator]
return board_ids
class UserBannedOn (db.Model):
""" A relation between user bans and boards, that signify
which boards are affected by a certain user ban """
__tablename__ = "user_banned_on"
ban_id = db.Column(db.Integer, primary_key=True)
board_id = db.Column(db.Integer, primary_key=True)
def __init__ (self, board_id, ban_id):
self.board_id = board_id
self.ban_id = ban_id
|
retooth/morse
|
morse/models/bans.py
|
Python
|
gpl-3.0
| 7,601
|
"""
Given a binary tree, convert it to BST. The conversion should be done in such a way
that keeps the original structure of binary tree.
Input:
10
/ \
2 7
/ \
8 4
Output:
8
/ \
4 10
/ \
2 7
Input:
10
/ \
30 15
/ \
20 5
Output:
15
/ \
10 20
/ \
5 30
"""
"""
Approach:
1. Do inorder traversal of tree and store it in a temp array.
2. Sort the temp array.
3. Do another inorder traversal, this time replace tree node values with values from sorted array.
Time complexity is O(nlog(n))
"""
def binary_to_bst(root):
result = []
inorder(root, result, False)
result = sorted(result)
inorder(root, result, True)
def inorder(root, result, result_to_bst):
if root:
inorder(root.left, result, result_to_bst)
if result_to_bst:
data = result.pop(0)
root.data = data
else:
result.append(root.data)
inorder(root.right, result, result_to_bst)
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
if __name__ == '__main__':
root = Node(10)
root.left = Node(2)
root.right = Node(7)
root.left.left = Node(8)
root.left.right = Node(4)
binary_to_bst(root)
assert root.data == 8
assert root.left.data == 4
assert root.right.data == 10
assert root.left.left.data == 2
assert root.left.right.data == 7
|
prathamtandon/g4gproblems
|
Graphs/binary_tree_to_bst.py
|
Python
|
mit
| 1,610
|
# Задача 8. Вариант 22.
# 1-50. Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка. Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений. Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.
# Nikishin P. S.
# 27.05.2016
import random
slova = ("музыка", "душа", "ковер", "принтер", "монитор", "коробка")
word = random.choice(slova)
verno = word
nz = "?"
podskazka = word[:3]
ann = ""
while word:
pos = random.randrange(len(word))
ann = ann + word[pos]
word = word[:pos] + word[(pos + 1):]
score = 10
print("Добро пожаловать в игру 'Анаграммы!'")
print("Надо переставить буквы так, чтобы получилось осмысленное слово.")
print("Если вам нужна подсказка, то просто напишите: '?' ")
print("За использование подсказки вы потеряйте 5 очков.")
print("Для выхода нажмите Enter, не вводя своей версии.")
print("\nВот анаграмма: ",ann)
variant = input("\nПопробуйте отгадать исходое слово: ")
while variant != "" and variant != verno:
if variant == "":
print("ujdyj")
if variant != verno and variant != nz:
print("Вы не угадали. ")
variant = input("\nПопробуйте еще раз: ")
if variant == nz:
score -= 5
print("\nПервые три буквы слова: ",podskazka)
variant = input("Попробуйте еще раз: ")
if variant == verno:
print("\n Поздравляю! Вы отгадали")
print("Спасибо за игру! У вас", score, "очков")
input("\nДля выхода нажмите Enter")
|
Mariaanisimova/pythonintask
|
PMIa/2015/NIKISHIN_P_S/task_8_22.py
|
Python
|
apache-2.0
| 2,335
|
import sys
import os.path
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
print("Creating a notepad") ## Replaced calc.exe by notepad.exe cause of windows 10.
notepad = windows.utils.create_process(r"C:\windows\system32\notepad.exe")
# You don't need to do that in our case, but it's useful to now
print("Priting threads")
for th in notepad.threads:
print(" * {0}".format(th))
print("Writing some code in memory")
if notepad.bitness == 32:
code = "mov eax, 0x42424242; label :start ; jmp :start; nop; nop; ret"
rawcode = x86.assemble(code)
else:
code = "mov rax, 0x4242424242424242; label :start ; jmp :start; nop; nop; ret"
rawcode = x64.assemble(code)
print("Allocating memory")
with notepad.allocated_memory(0x1000) as addr:
print("Writing code at <{0:#x}>".format(addr))
notepad.write_memory(addr, rawcode)
print("Creating thread on injected code")
t = notepad.create_thread(addr, 0x11223344)
print("New thread is {0}".format(t))
print("Suspending thread")
t.suspend()
ctx = t.context
print("Thread context is {0}".format(ctx))
print("Dumping thread context:")
ctx.dump()
print("Changing context")
ctx.pc += 2 # EIP / RIP
ctx.func_result = 0x12345678 # EAX / RAX
print("Setting new thread context")
t.set_context(ctx)
print("Resuming thread")
t.resume()
print("Waiting thread")
t.wait()
print("Thread has exit: {0}".format(t.is_exit))
print("Thread exit value = {0:#x}".format(t.exit_code))
|
hakril/PythonForWindows
|
samples/process/thread.py
|
Python
|
bsd-3-clause
| 1,631
|
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import codecs
import os
import unittest
import datetime
from timelinelib.calendar.monthnames import english_name_of_month
from timelinelib.calendar.monthnames import month_from_english_name
import timelinelib.meta.about
import timelinelib.meta.version
class SourceCodeDistributionSpec(unittest.TestCase):
IO = True
def test_version_number_in_README_should_match_that_in_version_module(self):
self.assertTrue(
self.get_module_version_string() in
self.read_first_line_from(self.README))
def test_version_number_in_CHANGES_should_match_that_in_version_module(self):
self.assertTrue(
self.get_module_version_string() in
self.read_first_line_from(self.CHANGES))
def test_all_authors_mentioned_in_about_module_should_be_mentioned_in_AUTHORS(self):
authors_content = self.read_utf8_encoded_text_from(self.AUTHORS)
for author in self.get_authors_from_about_module():
self.assertTrue(author in authors_content)
def setUp(self):
self.ROOT_DIR = os.path.join(os.path.dirname(__file__), "..")
self.README = os.path.join(self.ROOT_DIR, "README")
self.CHANGES = os.path.join(self.ROOT_DIR, "CHANGES")
self.AUTHORS = os.path.join(self.ROOT_DIR, "AUTHORS")
self.MANPAGE = os.path.join(self.ROOT_DIR, "man", "man1", "timeline.1")
def get_authors_from_about_module(self):
return [possible_author.strip()
for possible_author
in self.get_possible_authors_from_about_module()
if self.is_author_from_about_module(possible_author)]
def get_possible_authors_from_about_module(self):
return (timelinelib.meta.about.DEVELOPERS +
timelinelib.meta.about.TRANSLATORS +
timelinelib.meta.about.ARTISTS)
def is_author_from_about_module(self, possible_author):
return possible_author and not self.is_header(possible_author)
def is_header(self, possible_author):
return ":" in possible_author
def get_release_date_from_changes(self):
first_line = self.read_first_line_from(self.CHANGES)
date_str = first_line.split("released on ")[1]
return self.parse_release_date_from(date_str)
def parse_release_date_from(self, date_str):
day_str, month_name, year_str = date_str.split(" ")
return datetime.date(
int(year_str),
month_from_english_name(month_name),
int(day_str))
def release_date_in_man_format(self, date):
return "%s %s" % (english_name_of_month(date.month), date.year)
def get_module_version_string(self):
return "%s.%s.%s" % timelinelib.meta.version.VERSION
def read_first_line_from(self, path):
f = open(path, "r")
first_line = f.readline()
f.close()
return first_line
def read_utf8_encoded_text_from(self, path):
f = codecs.open(path, "r", "utf-8")
content = f.read()
f.close()
return content
|
ezequielpereira/Time-Line
|
specs/SourceCodeDistribution.py
|
Python
|
gpl-3.0
| 3,772
|
from .common import name_inner_event
from .newmessage import NewMessage
from ..tl import types
@name_inner_event
class MessageEdited(NewMessage):
"""
Occurs whenever a message is edited. Just like `NewMessage
<telethon.events.newmessage.NewMessage>`, you should treat
this event as a `Message <telethon.tl.custom.message.Message>`.
.. warning::
On channels, `Message.out <telethon.tl.custom.message.Message>`
will be `True` if you sent the message originally, **not if
you edited it**! This can be dangerous if you run outgoing
commands on edits.
Some examples follow:
* You send a message "A", ``out is True``.
* You edit "A" to "B", ``out is True``.
* Someone else edits "B" to "C", ``out is True`` (**be careful!**).
* Someone sends "X", ``out is False``.
* Someone edits "X" to "Y", ``out is False``.
* You edit "Y" to "Z", ``out is False``.
Since there are useful cases where you need the right ``out``
value, the library cannot do anything automatically to help you.
Instead, consider using ``from_users='me'`` (it won't work in
broadcast channels at all since the sender is the channel and
not you).
Example
.. code-block:: python
from telethon import events
@client.on(events.MessageEdited)
async def handler(event):
# Log the date of new edits
print('Message', event.id, 'changed at', event.date)
"""
@classmethod
def build(cls, update, others=None, self_id=None):
if isinstance(update, (types.UpdateEditMessage,
types.UpdateEditChannelMessage)):
return cls.Event(update.message)
class Event(NewMessage.Event):
pass # Required if we want a different name for it
|
expectocode/Telethon
|
telethon/events/messageedited.py
|
Python
|
mit
| 1,886
|
# Copyright (c) 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
import unittest
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from astroid import builder
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class BrainNumpyCoreFunctionBaseTest(unittest.TestCase):
"""
Test the numpy core numeric brain module
"""
numpy_functions = (
("linspace", "1, 100"),
("logspace", "1, 100"),
("geomspace", "1, 100"),
)
def _inferred_numpy_func_call(self, func_name, *func_args):
node = builder.extract_node(
f"""
import numpy as np
func = np.{func_name:s}
func({','.join(func_args):s})
"""
)
return node.infer()
def test_numpy_function_calls_inferred_as_ndarray(self):
"""
Test that calls to numpy functions are inferred as numpy.ndarray
"""
licit_array_types = (".ndarray",)
for func_ in self.numpy_functions:
with self.subTest(typ=func_):
inferred_values = list(self._inferred_numpy_func_call(*func_))
self.assertTrue(
len(inferred_values) == 1,
msg=f"Too much inferred value for {func_[0]:s}",
)
self.assertTrue(
inferred_values[-1].pytype() in licit_array_types,
msg="Illicit type for {:s} ({})".format(
func_[0], inferred_values[-1].pytype()
),
)
if __name__ == "__main__":
unittest.main()
|
PyCQA/astroid
|
tests/unittest_brain_numpy_core_function_base.py
|
Python
|
lgpl-2.1
| 2,168
|
"""Functions for interpreting data in different contexts. So, inverting a
colormap, interpolating points from pixel to data coordinates, and such."""
from __future__ import division, print_function
import numpy as np
from scipy.spatial import cKDTree
def invert_cmap(pix, l, colors):
"""
Given a sequence of pixels, convert each to an equivalent index in the
color sequence l, colors
Uses a scipy.spatial.cKDTree to find the color with closest coordinates in
RGB space.
"""
kd = cKDTree(colors)
ni, nj, nc = pix.shape
pix = pix.reshape((ni * nj, nc))
d, i = kd.query(pix)
i = i.reshape((ni, nj))
return l[i]
def order_corners(corners):
"""
bottom-left, bottom-right, top-right, top-left
min(x**2+y**2), argmin(y), argmax(x), left
"""
corners = list(corners)
assert len(corners) == 4
assert len(corners[0]) == 2
ordered = []
radii = [x ** 2 + y ** 2 for x, y in corners]
i = np.argmin(radii)
ordered.append(corners.pop(i))
i = np.argmin([y for x, y in corners])
ordered.append(corners.pop(i))
i = np.argmax([x for x, y in corners])
ordered.append(corners.pop(i))
ordered += corners
return ordered
def get_corner_grid(corners, ni, nj):
"""
Get uniform grid between corners
Parameters
----------
corners : list
list of x,y coordinates
ni : int
number of points in first index
nj : int
number of points in second index
Returns
-------
x : ndarray, shpae=(ni, nj)
grid of first dimension coordinates
y : ndarray, shpae=(ni, nj)
grid of second dimension coordinates
"""
bl, br, tr, tl = order_corners(corners)
x = np.zeros((ni, nj))
left_x = _midspace(tl[0], bl[0], ni)
right_x = _midspace(tr[0], br[0], ni)
bot_y = _midspace(bl[1], br[1], nj)
top_y = _midspace(tl[1], tr[1], nj)
di = 1. / ni
dj = 1. / nj
ispan = np.arange(di / 2, 1., di)
jspan = np.arange(dj / 2, 1., dj)
x = np.empty((ni, nj))
x[:, :] = jspan
x *= (right_x - left_x)[:, None]
x += left_x[:, None]
y = np.empty((ni, nj))
y[:, :] = ispan[:, None]
y *= (top_y - bot_y)
y += bot_y
return x, y
def _midspace(start, end, n):
"""
Cut region start/end into n regions and return the midpoint of each bin
"""
x = np.linspace(start, end, n + 1)
x = x[:-1] + np.diff(x)
return x
|
mrterry/yoink
|
yoink/interp.py
|
Python
|
bsd-3-clause
| 2,458
|
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='eh*&9(0n6lrfrji9r@g-#ucmngv2j+_7__i^4b*(lbt(064(+^')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', 'behave_django', 'webpack_loader')
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'src/', # must end with slash
'STATS_FILE': os.path.join(str(ROOT_DIR), 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
|
Alex-Just/gymlog
|
config/settings/local.py
|
Python
|
mit
| 2,584
|
import sys
sentid_prev = 0
first_line = True
first_word = True
for line in sys.stdin:
row = line.strip().split()
if first_line:
word_ix = row.index('word')
sentid_ix = row.index('sentid')
first_line = False
else:
word = row[word_ix]
sentid = row[sentid_ix]
if first_word:
delim = ''
first_word = False
elif sentid == sentid_prev:
delim = ' '
else:
delim = '\n'
sentid_prev = sentid
sys.stdout.write(delim + word)
sys.stdout.write('\n')
|
modelblocks/modelblocks-release
|
resource-general/scripts/itemmeasures2lineitems.py
|
Python
|
gpl-3.0
| 588
|
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.db import reset_queries
from django.db.models.options import Options
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator(object):
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Acts as either a decorator or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
super(override_settings, self).__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
class override_system_checks(TestContextDecorator):
"""
Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super(override_system_checks, self).__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = self.new_checks
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = self.deployment_checks
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")
def is_quoted_unicode(s):
s = s.strip()
return len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super(ignore_warnings, self).__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""
Decorator or context manager to temporary override the script prefix.
"""
def __init__(self, prefix):
self.prefix = prefix
super(override_script_prefix, self).__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super(isolate_apps, self).__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""
Decorator to add tags to a test class or method.
"""
def decorator(obj):
setattr(obj, 'tags', set(tags))
return obj
return decorator
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/django/test/utils.py
|
Python
|
bsd-2-clause
| 23,443
|
# This file provides the installation of the python library 'isca'
# Suggested installation procedure:
# $ cd $GFDL_BASE/src/extra/python
# $ pip install -e .
# This installs the package in *development mode* i.e. any changes you make to the python files
# or any additional files you add will be immediately available.
# In a new python console, from any directory, you can now access the execlim code:
# >>> from isca import experiment
# >>> exp = experiment.Experiment()
# ...
from distutils.core import setup
setup(name='Isca',
version='0.2',
description='Isca utilities for running experiments and performing data analysis',
author='Isca Team',
url='https://github.com/ExeClim/Isca',
packages=['isca'],
install_requires=[
'sh',
'jinja2',
'f90nml',
'numpy',
'pandas',
'xarray'
]
)
|
sit23/Isca
|
src/extra/python/setup.py
|
Python
|
gpl-3.0
| 911
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CLIC dataset."""
from tensorflow_datasets.image import clic
import tensorflow_datasets.public_api as tfds
class ClicTest(tfds.testing.DatasetBuilderTestCase):
DATASET_CLASS = clic.CLIC
SPLITS = {
"train": 1,
"validation": 1,
"test": 1,
}
DL_EXTRACT_RESULT = {
"mobile_train": "train",
"prof_train": "skip",
"mobile_val": "skip",
"prof_val": "valid",
"mobile_test": "skip",
"prof_test": "test",
}
if __name__ == "__main__":
tfds.testing.test_main()
|
tensorflow/datasets
|
tensorflow_datasets/image/clic_test.py
|
Python
|
apache-2.0
| 1,134
|
#
# Copyright (C) 2014 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Forms for sorted stats"""
from operator import itemgetter
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Fieldset, Row, Column
from nav.web.crispyforms import LabelSubmit
from . import CLASSMAP, TIMEFRAMES
def get_sections_list():
"""Return sections list usable in form choices"""
return sorted([(x[0], x[1].title) for x in CLASSMAP.items()],
key=itemgetter(0))
class NumberInput(forms.TextInput):
"""Input widget with type set to number"""
input_type = 'number'
class NumberField(forms.IntegerField):
"""Input field with type set to number"""
widget = NumberInput
class ViewForm(forms.Form):
"""Form the choosing which view to see on the statistics page"""
view = forms.ChoiceField(choices=get_sections_list())
timeframe = forms.ChoiceField(choices=TIMEFRAMES, initial=TIMEFRAMES[1][0])
rows = NumberField(initial=5)
def __init__(self, *args, **kwargs):
super(ViewForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'custom'
self.helper.form_action = ''
self.helper.form_method = 'GET'
self.helper.layout = Layout(
Fieldset(
'Choose statistic',
Row(
Column('view', css_class='medium-5'),
Column('timeframe', css_class='medium-3'),
Column('rows', css_class='medium-1'),
Column(LabelSubmit('submit', 'Show statistics',
css_class='postfix'),
css_class='medium-3'),
)
)
)
|
UNINETT/nav
|
python/nav/web/sortedstats/forms.py
|
Python
|
gpl-2.0
| 2,379
|
"""
Django settings for app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'YOUR_SECRET_KEY'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(os.path.dirname(__file__), 'templates'),
],
'APP_DIRS': True,
},
]
# Application definition
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.tz",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django_su.context_processors.is_su",
)
PROJECT_APPS = [
'django_su',
'django.contrib.admin',
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.contenttypes',
# 'guardian',
'formadmin', # pip install django-form-admin
'ajax_select', # pip install django-ajax-select
] + PROJECT_APPS
ROOT_URLCONF = 'example.urls'
SITE_ID = 1
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
# "guardian.backends.ObjectPermissionBackend",
"django_su.backends.SuBackend",
)
# ANONYMOUS_USER_ID = -1
# URL to redirect after the login.
# Default: "/"
SU_LOGIN_REDIRECT_URL = "/"
# URL to redirect after the logout.
# Default: "/"
SU_LOGOUT_REDIRECT_URL = "/"
# A function to specify the perms that the user must have can use django_su
# Default: None
SU_LOGIN_CALLBACK = "example.utils.su_login_callback"
# A function to override the django.contrib.auth.login(request, user)
# function so you can set session data, etc.
# Default: None
SU_CUSTOM_LOGIN_ACTION = "example.utils.custom_login"
if 'ajax_select' in INSTALLED_APPS:
AJAX_LOOKUP_CHANNELS = {
'django_su': dict(model='auth.user', search_field='username'),
}
|
PetrDlouhy/django-su
|
example/settings.py
|
Python
|
mit
| 3,436
|
from simple_parsing import ArgumentParser
from dataclasses import dataclass
@dataclass
class Config:
"""Simple example of a class that can be reused"""
log_dir: str = "logs"
parser = ArgumentParser()
parser.add_arguments(Config, "train_config", prefix="train_")
parser.add_arguments(Config, "valid_config", prefix="valid_")
args = parser.parse_args()
print(vars(args))
|
lebrice/SimpleParsing
|
examples/prefixing/manual_prefix_example.py
|
Python
|
mit
| 382
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import deque
from nupic.bindings.regions.PyRegion import PyRegion
class RawValues(PyRegion):
"""
RawDate is a simple region used to send raw scalar values into networks.
It accepts data using the command "addDataToQueue" or through the function
addDataToQueue() which can be called directly from Python. Data is queued up
in a FIFO and each call to compute pops the top element.
Each data record consists of list of floats and a 0/1 reset flag.
"""
def __init__(self, outputWidth=1):
self.outputWidth = outputWidth
self.queue = deque()
@classmethod
def getSpec(cls):
spec = dict(
description=RawValues.__doc__,
singleNodeOnly=True,
outputs=dict(
dataOut=dict(
description="List of floats",
dataType="Real32",
count=0,
regionLevel=True,
isDefaultOutput=True
),
resetOut=dict(
description="Reset flag",
dataType="Bool",
count=1,
regionLevel=True,
isDefaultOutput=False
),
),
inputs=dict(),
parameters=dict(
outputWidth=dict(
description="Size of output data",
dataType="UInt32",
accessMode="ReadWrite",
count=1,
defaultValue=1,
)
),
commands=dict(
addDataToQueue=dict(description="Add data to region. Each data record "
"consists of list of and a reset flag")
)
)
return spec
def compute(self, inputs, outputs):
"""
Get the next record from the queue and outputs it.
"""
if len(self.queue) > 0:
# Take the top element of the data queue
data = self.queue.pop()
else:
raise Exception("RawValues: No data: queue is empty ")
# Copy data into output vectors
outputs["resetOut"][0] = data["reset"]
outputs["dataOut"][:] = data["dataOut"]
def addDataToQueue(self, displacement, reset=False):
"""
Add the given displacement to the region's internal queue. Calls to compute
will cause items in the queue to be dequeued in FIFO order.
:param displacement: Two floats representing translation vector [dx, dy] to
be passed to the linked regions via 'dataOut'
:type displacement: list
:param reset: Reset flag to be passed to the linked regions via 'resetOut'
:type reset: bool
"""
self.queue.appendleft({
"dataOut": list(displacement),
"reset": bool(reset)
})
def getOutputElementCount(self, name):
if name == "resetOut":
return 1
elif name == "dataOut":
return self.outputWidth
else:
raise Exception("Unknown output {}.".format(name))
def initialize(self):
pass
|
neuroidss/nupic.research
|
htmresearch/regions/RawValues.py
|
Python
|
agpl-3.0
| 3,767
|
import subprocess
from build.project import Project
class ZlibProject(Project):
def __init__(self, url, md5, installed,
**kwargs):
Project.__init__(self, url, md5, installed, **kwargs)
def build(self, toolchain):
src = self.unpack(toolchain, out_of_tree=False)
subprocess.check_call(['/usr/bin/make', '--quiet',
'-f', 'win32/Makefile.gcc',
'PREFIX=' + toolchain.arch + '-',
'-j12',
'install',
'DESTDIR=' + toolchain.install_prefix + '/',
'INCLUDE_PATH=include',
'LIBRARY_PATH=lib',
'BINARY_PATH=bin', 'SHARED_MODE=1'],
cwd=src, env=toolchain.env)
|
susman/mpd
|
python/build/zlib.py
|
Python
|
gpl-2.0
| 708
|
#!/usr/bin/python
# method - constructor
# blueprint
# class Account(object):
# class Account:
class Account:
def __init__(self): # constructor - get called implicitly.
self.balance = 0 # data
def deposit(self):
self.balance = self.balance + 1000
return self.balance
def withdraw(self):
self.balance = self.balance - 200
return self.balance
def amount(self): # method
# self denotes the instance itself
return "my balance is {}".format(self.balance)
# kumar
kumar = Account()
print dir(kumar)
kumar.deposit()
print kumar.amount()
kumar.withdraw()
print kumar.amount()
# dhar
dhar = Account()
print dhar.amount()
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-65/14-oop/05-program.py
|
Python
|
gpl-3.0
| 634
|
"""
HamiltonianPy
=============
Provides
1. Unified description of common lattice with translation symmetry;
2. Bases of the Hilbert space in occupation number representation;
3. Building block for constructing a model Hamiltonian;
4. Lanczos algorithm for calculating the ground state energy and single
particle Green's function, etc.
"""
from .GreenFunction import *
from .lattice import *
from .quantumoperator import *
from .bond import Bond
from .hilbertspace import base_vectors
from .indextable import IndexTable
from .lanczos import KrylovRepresentation, MultiKrylov
from .line2d import Line2D, Location
from .rotation3d import RotationEuler, RotationGeneral, RotationX, RotationY, RotationZ
from .version import version as __version__
__all__ = [
"__version__",
"Bond", "base_vectors", "IndexTable",
"KrylovRepresentation", "MultiKrylov",
"Line2D", "Location",
"RotationEuler", "RotationGeneral",
"RotationX", "RotationY", "RotationZ",
]
__all__ += GreenFunction.__all__
__all__ += lattice.__all__
__all__ += quantumoperator.__all__
|
wangshiphys/HamiltonianPy
|
HamiltonianPy/__init__.py
|
Python
|
gpl-3.0
| 1,082
|
# -*- coding: utf-8 -*-
import os
import tempfile
from mock import patch
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.test.client import RequestFactory
from olympia import amo
from olympia.amo.tests import TestCase, addon_factory, req_factory_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import rm_local_tmp_dir
from olympia.addons import forms
from olympia.addons.models import Addon, Category
from olympia.files.helpers import copyfileobj
from olympia.tags.models import Tag, AddonTag
from olympia.users.models import UserProfile
class TestAddonFormSupport(TestCase):
def test_bogus_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'javascript://something.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_ftp_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'ftp://foo.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_http_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'http://foo.com'}, request=None)
assert form.is_valid()
class FormsTest(TestCase):
fixtures = ('base/addon_3615', 'base/addon_3615_categories',
'addons/blacklisted')
def setUp(self):
super(FormsTest, self).setUp()
self.existing_name = 'Delicious Bookmarks'
self.non_existing_name = 'Does Not Exist'
self.error_msg = 'This name is already in use. Please choose another.'
self.request = req_factory_factory('/')
def test_update_addon_non_existing_name(self):
"""An add-on edit can change the name to any non-existing name."""
addon = addon_factory(name='some name')
form = forms.AddonFormBasic(dict(name=self.non_existing_name),
request=self.request, instance=addon)
form.is_valid()
assert 'name' not in form.errors
def test_update_addon_existing_name(self):
"""An add-on edit can't change the name to an existing add-on name."""
addon = addon_factory(name='some name')
form = forms.AddonFormBasic(dict(name=self.existing_name),
request=self.request, instance=addon)
assert not form.is_valid()
assert form.errors['name'][0][1] == self.error_msg
def test_update_addon_existing_name_used_by_unlisted(self):
"""An add-on edit can change the name to an existing name used by an
unlisted add-on."""
Addon.objects.get(pk=3615).update(is_listed=False)
addon = addon_factory(name='some name')
form = forms.AddonFormBasic(dict(name=self.existing_name),
request=self.request, instance=addon)
form.is_valid()
assert 'name' not in form.errors
def test_update_addon_existing_name_used_by_listed(self):
"""An unlisted add-on edit can change the name to an existing name used
by an listed add-on."""
addon = addon_factory(name='some name', is_listed=False)
form = forms.AddonFormBasic(dict(name=self.existing_name),
request=self.request, instance=addon)
form.is_valid()
assert 'name' not in form.errors
def test_update_addon_existing_name_used_by_other_type(self):
"""An add-on edit can change the name to an existing name used by
another add-on type."""
addon = addon_factory(name='some name', type=amo.ADDON_PERSONA)
form = forms.AddonFormBasic(dict(name=self.existing_name),
request=self.request, instance=addon)
form.is_valid()
assert 'name' not in form.errors
def test_old(self):
"""
Exiting add-ons shouldn't be able to use someone else's name.
"""
a = Addon.objects.create(type=1)
f = forms.AddonFormBasic(dict(name=self.existing_name),
request=self.request, instance=a)
assert not f.is_valid()
assert f.errors.get('name')[0][1] == self.error_msg
def test_old_same(self):
"""
Exiting add-ons should be able to re-use their name.
"""
delicious = Addon.objects.get()
f = forms.AddonFormBasic(dict(name=self.existing_name),
request=self.request, instance=delicious)
f.is_valid()
assert f.errors.get('name') is None
def test_locales(self):
form = forms.AddonFormDetails(request=self.request)
assert form.fields['default_locale'].choices[0][0] == 'af'
def test_slug_blacklist(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': 'submit'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "submit". Please choose another.'])
def test_bogus_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'javascript://something.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_ftp_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'ftp://foo.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_homepage_is_not_required(self):
delicious = Addon.objects.get()
form = forms.AddonFormDetails(
{'default_locale': 'en-US'},
request=self.request, instance=delicious)
assert form.is_valid()
def test_slug_isdigit(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': '123'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "123". Please choose another.'])
class TestTagsForm(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestTagsForm, self).setUp()
self.addon = Addon.objects.get(pk=3615)
category = Category.objects.get(pk=22)
category.name = 'test'
category.save()
self.data = {
'summary': str(self.addon.summary),
'name': str(self.addon.name),
'slug': self.addon.slug,
}
self.user = self.addon.authors.all()[0]
amo.set_user(self.user)
self.request = req_factory_factory('/')
def add_tags(self, tags):
data = self.data.copy()
data.update({'tags': tags})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
return form
def get_tag_text(self):
return [t.tag_text for t in self.addon.tags.no_cache().all()]
def test_tags(self):
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
def test_tags_xss(self):
self.add_tags('<script>alert("foo")</script>, bar')
assert self.get_tag_text() == ['bar', 'scriptalertfooscript']
def test_tags_case_spaces(self):
self.add_tags('foo, bar')
self.add_tags('foo, bar , Bar, BAR, b a r ')
assert self.get_tag_text() == ['b a r', 'bar', 'foo']
def test_tags_spaces(self):
self.add_tags('foo, bar beer')
assert self.get_tag_text() == ['bar beer', 'foo']
def test_tags_unicode(self):
self.add_tags(u'Österreich')
assert self.get_tag_text() == [u'Österreich'.lower()]
def add_restricted(self, *args):
if not args:
args = ['restartless']
for arg in args:
tag = Tag.objects.create(tag_text=arg, restricted=True)
AddonTag.objects.create(tag=tag, addon=self.addon)
def test_tags_restricted(self):
self.add_restricted()
self.add_tags('foo, bar')
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo'
assert self.get_tag_text() == ['bar', 'foo', 'restartless']
self.add_tags('')
assert self.get_tag_text() == ['restartless']
def test_tags_error(self):
self.add_restricted('restartless', 'sdk')
data = self.data.copy()
data.update({'tags': 'restartless'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"restartless" is a reserved tag and cannot be used.')
data.update({'tags': 'restartless, sdk'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"restartless", "sdk" are reserved tags and cannot be used.')
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted(self, action_allowed):
action_allowed.return_value = True
self.add_restricted('restartless')
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
self.add_tags('foo, bar, restartless')
assert self.get_tag_text() == ['bar', 'foo', 'restartless']
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo, restartless'
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted_count(self, action_allowed):
action_allowed.return_value = True
self.add_restricted()
self.add_tags('restartless, %s' % (', '.join('tag-test-%s' %
i for i in range(0, 20))))
def test_tags_restricted_count(self):
self.add_restricted()
self.add_tags(', '.join('tag-test-%s' % i for i in range(0, 20)))
def test_tags_slugified_count(self):
self.add_tags(', '.join('tag-test' for i in range(0, 21)))
assert self.get_tag_text() == ['tag-test']
def test_tags_limit(self):
self.add_tags(' %s' % ('t' * 128))
def test_tags_long(self):
tag = ' -%s' % ('t' * 128)
data = self.data.copy()
data.update({"tags": tag})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert not form.is_valid()
assert form.errors['tags'] == [
'All tags must be 128 characters or less after invalid characters'
' are removed.']
class TestIconForm(TestCase):
fixtures = ['base/addon_3615']
# TODO: AddonFormMedia save() method could do with cleaning up
# so this isn't necessary
def setUp(self):
super(TestIconForm, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.addon = Addon.objects.get(pk=3615)
class DummyRequest:
FILES = None
self.request = DummyRequest()
self.icon_path = os.path.join(settings.TMP_PATH, 'icon')
if not os.path.exists(self.icon_path):
os.makedirs(self.icon_path)
def tearDown(self):
rm_local_tmp_dir(self.temp_dir)
super(TestIconForm, self).tearDown()
def get_icon_paths(self):
path = os.path.join(self.addon.get_icon_dir(), str(self.addon.id))
return ['%s-%s.png' % (path, size) for size in amo.ADDON_ICON_SIZES]
@patch('olympia.addons.models.Addon.get_icon_dir')
def testIconUpload(self, get_icon_dir):
# TODO(gkoberger): clarify this please.
# We no longer use AddonFormMedia to upload icons, so
# skipping until I can ask andym what the point of this
# test is. Additionally, it's called "TestIconRemoval",
# but it doesn't seem to remove icons.
return
get_icon_dir.return_value = self.temp_dir
for path in self.get_icon_paths():
assert not os.path.exists(path)
img = get_image_path('non-animated.png')
data = {'icon_upload': img, 'icon_type': 'text/png'}
self.request.FILES = {'icon_upload': open(img)}
form = forms.AddonFormMedia(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
for path in self.get_icon_paths():
assert os.path.exists(path)
@patch('olympia.amo.models.ModelBase.update')
def test_icon_modified(self, update_mock):
name = 'transparent.png'
form = forms.AddonFormMedia({'icon_upload_hash': name},
request=self.request,
instance=self.addon)
dest = os.path.join(self.icon_path, name)
with storage.open(dest, 'w') as f:
copyfileobj(open(get_image_path(name)), f)
assert form.is_valid()
form.save(addon=self.addon)
assert update_mock.called
class TestCategoryForm(TestCase):
def test_no_possible_categories(self):
Category.objects.create(type=amo.ADDON_SEARCH,
application=amo.FIREFOX.id)
addon = Addon.objects.create(type=amo.ADDON_SEARCH)
request = req_factory_factory('/')
form = forms.CategoryFormSet(addon=addon, request=request)
apps = [f.app for f in form.forms]
assert apps == [amo.FIREFOX]
class TestThemeForm(TestCase):
# Don't save image, we use a fake one.
@patch('olympia.addons.forms.save_theme')
def test_long_author_or_display_username(self, mock_save_theme):
# Bug 1181751.
user = UserProfile.objects.create(email='foo@bar.com',
username='a' * 255,
display_name='b' * 255)
request = RequestFactory()
request.user = user
cat = Category.objects.create(type=amo.ADDON_PERSONA)
form = forms.ThemeForm({
'name': 'my theme',
'slug': 'my-theme',
'category': cat.pk,
'header': 'some_file.png',
'agreed': True,
'header_hash': 'hash',
'license': 1}, request=request)
assert form.is_valid()
# Make sure there's no database issue, like too long data for the
# author or display_sername fields.
form.save()
|
andymckay/addons-server
|
src/olympia/addons/tests/test_forms.py
|
Python
|
bsd-3-clause
| 14,967
|
import config
from controlevents import CEvent, ConsoleEvent
import historybuffer
from utils import timers, hw
dim = 'Bright'
def Dim():
global dim
dim = 'Dim'
hw.GoDim(int(config.sysStore.DimLevel))
def Brighten():
global dim
dim = 'Bright'
hw.GoBright(int(config.sysStore.BrightLevel))
def DimState():
return dim
ScreenStack = []
screenstate = 'Home'
HBScreens = historybuffer.HistoryBuffer(20, 'Screens') # history buffer for screen activities
Chain = 0 # which screen chain is active 0: Main chain 1: Secondary Chain
ActivityTimer = timers.ResettableTimer(name='ActivityTimer', start=True)
activityseq = 0
def SetActivityTimer(timeinsecs, dbgmsg):
global activityseq
activityseq += 1
ActivityTimer.set(ConsoleEvent(CEvent.ACTIVITYTIMER, seq=activityseq, msg=dbgmsg), timeinsecs)
|
kevinkahn/softconsole
|
guicore/screenmgt.py
|
Python
|
apache-2.0
| 807
|
#!/usr/bin/env python
"""
decorator.
"""
def command(func):
"""docstring for is_command"""
func.is_command = True
return func
|
marlboromoo/basinboa
|
basinboa/system/decorator.py
|
Python
|
mit
| 139
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options registration and useful routines.
"""
import sys
from oslo_config import cfg
import st2common.config as common_config
from st2common.constants.system import VERSION_STRING
CONF = cfg.CONF
def parse_args(args=None):
CONF(args=args, version=VERSION_STRING)
def register_opts():
_register_common_opts()
_register_action_runner_opts()
def _register_common_opts():
common_config.register_opts()
def _register_action_runner_opts():
logging_opts = [
cfg.StrOpt('logging', default='conf/logging.conf',
help='location of the logging.conf file'),
cfg.StrOpt('python_binary', default=sys.executable,
help='Python binary which will be used by Python actions.')
]
CONF.register_opts(logging_opts, group='actionrunner')
db_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='host of db server'),
cfg.IntOpt('port', default=27017, help='port of db server'),
cfg.StrOpt('db_name', default='st2', help='name of database')
]
CONF.register_opts(db_opts, group='database')
ssh_runner_opts = [
cfg.StrOpt('remote_dir',
default='/tmp',
help='Location of the script on the remote filesystem.'),
cfg.BoolOpt('allow_partial_failure',
default=False,
help='How partial success of actions run on multiple nodes should be treated.')
]
CONF.register_opts(ssh_runner_opts, group='ssh_runner')
mistral_opts = [
cfg.StrOpt('v2_base_url', default='http://localhost:8989/v2',
help='Mistral v2 API server root endpoint.'),
cfg.IntOpt('max_attempts', default=180,
help='Maximum no of attempts made to connect to Mistral.'),
cfg.IntOpt('retry_wait', default=5,
help='Time in seconds to wait before retrying connection to Mistral.')
]
CONF.register_opts(mistral_opts, group='mistral')
cloudslang_opts = [
cfg.StrOpt('home_dir', default='/opt/cslang',
help='CloudSlang home directory.'),
]
CONF.register_opts(cloudslang_opts, group='cloudslang')
def get_logging_config_path():
return CONF.actionrunner.logging
register_opts()
|
grengojbo/st2
|
st2actions/st2actions/config.py
|
Python
|
apache-2.0
| 3,073
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 5
from fractions import gcd
def lcm(int1, int2):
"""Calculate the least common multiple of two integers, a and b."""
return int(int1*int2/gcd(int1, int2))
from functools import reduce
print(reduce(lcm, range(1, 20+1)))
|
GT-IDEaS/SkillsWorkshop2017
|
Week01/Problem05/cyu_05.py
|
Python
|
bsd-3-clause
| 363
|
from django.db import models
class StatusPage(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
description = models.CharField(max_length=1000, null=True, blank=True, default=None)
def __str__(self):
return self.name
|
leonardoarroyo/easystatus
|
easystatusapi/models/status_page.py
|
Python
|
gpl-3.0
| 259
|
# -*- coding: latin-1 -*-
import common
import sys, os, traceback
import time
import random
import re
import urllib
import string
import HTMLParser
from string import lower
from entities.CList import CList
from entities.CItemInfo import CItemInfo
from entities.CListItem import CListItem
from entities.CRuleItem import CRuleItem
import customReplacements as cr
import customConversions as cc
from utils import encodingUtils as enc, regexUtils
from utils import decryptionUtils as crypt
from utils import datetimeUtils as dt
from utils.webUtils import get_redirected_url
from utils.fileUtils import findInSubdirectory, getFileContent, getFileExtension
from utils.scrapingUtils import findVideoFrameLink, findContentRefreshLink, findRTMP, findJS, findPHP, getHostName, findEmbedPHPLink, findVCods
from common import getHTML
class ParsingResult(object):
class Code:
SUCCESS = 0
CFGFILE_NOT_FOUND = 1
CFGSYNTAX_INVALID = 2
WEBREQUEST_FAILED = 3
def __init__(self, code, itemsList):
self.code = code
self.list = itemsList
self.message = None
class Parser(object):
"""
returns a list of items
"""
def parse(self, lItem):
url = lItem['url']
cfg = lItem['cfg']
ext = getFileExtension(url)
successfullyScraped = True
tmpList = None
if lItem['catcher']:
catcher = lItem['catcher']
cfg = os.path.join(common.Paths.catchersDir, '__' + catcher + '.cfg')
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
else:
if ext == 'cfg':
tmpList = self.__loadLocal(url, lItem)
if tmpList and tmpList.start != '' and len(tmpList.rules) > 0:
lItem['url'] = tmpList.start
successfullyScraped = self.__loadRemote(tmpList, lItem)
elif cfg:
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
# autoselect
if tmpList and tmpList.skill.find('autoselect') != -1 and len(tmpList.items) == 1:
m = tmpList.items[0]
m_type = m['type']
if m_type == 'rss':
common.log('Autoselect - ' + m['title'])
lItem = m
tmpList = self.parse(lItem).list
if not tmpList:
return ParsingResult(ParsingResult.Code.CFGSYNTAX_INVALID, None)
if tmpList and successfullyScraped == False:
return ParsingResult(ParsingResult.Code.WEBREQUEST_FAILED, None)
# Remove duplicates
if tmpList.skill.find('allowDuplicates') == -1:
urls = []
for i in range(len(tmpList.items)-1,-1,-1):
item = tmpList.items[i]
tmpUrl = item['url']
tmpCfg = item['cfg']
if not tmpCfg:
tmpCfg = ''
if not urls.__contains__(tmpUrl + '|' + tmpCfg):
urls.append(tmpUrl + '|' + tmpCfg)
else:
tmpList.items.remove(item)
return ParsingResult(ParsingResult.Code.SUCCESS, tmpList)
"""
loads cfg, creates list and sets up rules for scraping
"""
def __loadLocal(self, filename, lItem = None):
params = []
#get Parameters
if filename.find('@') != -1:
params = filename.split('@')
filename = params.pop(0)
# get cfg file
cfg = filename
if not os.path.exists(cfg):
cfg = os.path.join(common.Paths.modulesDir, filename)
if not os.path.exists(cfg):
tmpPath = os.path.dirname(os.path.join(common.Paths.modulesDir, lItem["definedIn"]))
cfg = os.path.join(tmpPath ,filename)
if not os.path.exists(cfg):
srchFilename = filename
if filename.find('/') > -1:
srchFilename = srchFilename.split('/')[1]
try:
cfg = findInSubdirectory(srchFilename, common.Paths.modulesDir)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.favouritesFolder)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.customModulesDir)
except:
common.log('File not found: ' + srchFilename)
return None
#load file and apply parameters
data = getFileContent(cfg)
data = cr.CustomReplacements().replace(os.path.dirname(cfg), data, lItem, params)
#log
msg = 'Local file ' + filename + ' opened'
if len(params) > 0:
msg += ' with Parameter(s): '
msg += ",".join(params)
common.log(msg)
outputList = self.__parseCfg(filename, data, lItem)
return outputList
"""
scrape items according to rules and add them to the list
"""
def __loadRemote(self, inputList, lItem):
try:
inputList.curr_url = lItem['url']
count = 0
i = 1
maxits = 2 # 1 optimistic + 1 demystified
ignoreCache = False
demystify = False
back = ''
startUrl = inputList.curr_url
#print inputList, lItem
while count == 0 and i <= maxits:
if i > 1:
ignoreCache = True
demystify = True
# Trivial: url is from known streamer
if back:
lItem['referer'] = back
items = self.__parseHtml(inputList.curr_url, '"' + inputList.curr_url + '"', inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
# try to find items in html source code
if count == 0:
referer = ''
if lItem['referer']:
referer = lItem['referer']
inputList.curr_url = HTMLParser.HTMLParser().unescape(urllib.unquote(inputList.curr_url))
data = common.getHTML(inputList.curr_url, None, referer, ignoreCache, demystify)
if data == '':
return False
msg = 'Remote URL ' + str(inputList.curr_url) + ' opened'
if demystify:
msg += ' (demystified)'
common.log(msg)
if inputList.section != '':
section = inputList.section
data = self.__getSection(data, section)
if lItem['section']:
section = lItem['section']
data = self.__getSection(data, section)
items = self.__parseHtml(inputList.curr_url, data, inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
common.log(' -> ' + str(count) + ' item(s) found')
# find rtmp stream
#common.log('Find rtmp stream')
if count == 0:
item = self.__findRTMP(data, startUrl, lItem)
if item:
items = []
items.append(item)
count = 1
# find embedding javascripts
#common.log('Find embedding javascripts')
if count == 0:
item = findJS(data)
if item:
firstJS = item[0]
streamId = firstJS[0]
jsUrl = firstJS[1]
streamerName = getHostName(jsUrl)
jsSource = getHTML(jsUrl, None, startUrl, True, False)
phpUrl = findPHP(jsSource, streamId)
if phpUrl:
data = getHTML(phpUrl, None, startUrl, True, True)
item = self.__findRTMP(data, phpUrl, lItem)
if item:
if streamerName:
item['title'] = item['title'].replace('RTMP', streamerName)
items = []
items.append(item)
count = 1
else:
red = phpUrl
common.log(' -> Redirect: ' + red)
back = inputList.curr_url
inputList.curr_url = red
common.log(str(len(inputList.items)) + ' items ' + inputList.cfg + ' -> ' + red)
startUrl = red
continue
# find vcods
#common.log('find vcods')
if count == 0:
vcods = findVCods(data)
if vcods:
sUrl = vcods[0]
cod1 = vcods[1]
cod2 = vcods[2]
swfUrl = vcods[3]
unixTS = str(dt.getUnixTimestamp())
sUrl = sUrl + '?callback=jQuery1707757964063647694_1347894980192&v_cod1=' + cod1 + '&v_cod2=' + cod2 + '&_=' + unixTS
tmpData = getHTML(sUrl, None, urllib.unquote_plus(startUrl), True, False)
if tmpData and tmpData.find("Bad Request") == -1:
newReg = '"result1":"([^\"]+)","result2":"([^\"]+)"'
link = regexUtils.findall(tmpData, newReg)
if link:
_file = link[0][0]
rtmp = link[0][1].replace('\\','')
#.replace('/redirect','/vod')
item = CListItem()
item['title'] = getHostName(sUrl) + '* - ' + _file
item['type'] = 'video'
item['url'] = rtmp + ' playPath=' + _file + ' swfUrl=' + swfUrl +' swfVfy=1 live=true pageUrl=' + startUrl
item.merge(lItem)
items.append(item)
count = 1
# find redirects
#common.log('find redirects')
if count == 0:
red = self.__findRedirect(startUrl, inputList.curr_url)
if startUrl == red:
common.log(' -> No redirect found')
else:
red = HTMLParser.HTMLParser().unescape(red)
red = urllib.unquote(red)
common.log(' -> Redirect: ' + red)
back = inputList.curr_url
inputList.curr_url = red
common.log(str(len(inputList.items)) + ' items ' + inputList.cfg + ' -> ' + red)
startUrl = red
i = 0
i += 1
if count != 0:
inputList.items = inputList.items + items
except IOError:
if common.enable_debug:
traceback.print_exc(file = sys.stdout)
return False
return True
def __findRTMP(self, data, pageUrl, lItem):
rtmp = findRTMP(pageUrl, data)
if rtmp:
item = CListItem()
item['title'] = 'RTMP* - ' + rtmp[1]
item['type'] = 'video'
item['url'] = rtmp[0] + ' playPath=' + rtmp[1] + ' swfUrl=' + rtmp[2] +' swfVfy=1 live=true pageUrl=' + pageUrl
item.merge(lItem)
return item
return None
def __getSection(self, data, section):
p = re.compile(section, re.IGNORECASE + re.DOTALL + re.UNICODE)
m = p.search(data)
if m:
return m.group(0)
else:
common.log(' -> Section could not be found:' + section)
return data
def __findRedirect(self, page, referer='', demystify=False):
data = common.getHTML(page, None, referer = referer, demystify = demystify)
if findVideoFrameLink(page, data):
return findVideoFrameLink(page, data)
elif findContentRefreshLink(data):
return findContentRefreshLink(data)
elif findEmbedPHPLink(data):
return findEmbedPHPLink(data)
if not demystify:
return self.__findRedirect(page, referer, True)
return page
def __parseCfg(self, cfgFile, data, lItem):
tmpList = CList()
data = data.replace('\r\n', '\n').split('\n')
items = []
tmp = None
hasOwnCfg = False
for m in data:
if m and m[0] != '#':
index = m.find('=')
if index != -1:
key = lower(m[:index]).strip()
value = m[index+1:]
index = value.find('|')
if value[:index] == 'sports.devil.locale':
value = common.translate(int(value[index+1:]))
elif value[:index] == 'sports.devil.image':
value = os.path.join(common.Paths.imgDir, value[index+1:])
if key == 'start':
tmpList.start = value
elif key == 'section':
tmpList.section = value
elif key == 'sort':
tmpList.sort = value
elif key == 'skill':
tmpList.skill = value
elif key == 'catcher':
tmpList.catcher = value
elif key == 'item_infos':
rule_tmp = CRuleItem()
hasOwnCfg = False
rule_tmp.infos = value
elif key == 'item_order':
rule_tmp.order = value
elif key == 'item_skill':
rule_tmp.skill = value
elif key == 'item_curr':
rule_tmp.curr = value
elif key == 'item_precheck':
rule_tmp.precheck = value
elif key.startswith('item_info'):
tmpkey = key[len('item_info'):]
if tmpkey == '_name':
info_tmp = CItemInfo()
info_tmp.name = value
if value == 'cfg':
hasOwnCfg = True
elif tmpkey == '_from':
info_tmp.src = value
elif tmpkey == '':
info_tmp.rule = value
elif tmpkey == '_default':
info_tmp.default = value
elif tmpkey == '_convert':
info_tmp.convert.append(value)
elif tmpkey == '_build':
info_tmp.build = value
rule_tmp.info_list.append(info_tmp)
elif key == 'item_url_build':
rule_tmp.url_build = value
if tmpList.catcher != '':
refInf = CItemInfo()
refInf.name = 'referer'
refInf.build = value
rule_tmp.info_list.append(refInf)
if not hasOwnCfg:
refInf = CItemInfo()
refInf.name = 'catcher'
refInf.build = tmpList.catcher
rule_tmp.info_list.append(refInf)
tmpList.rules.append(rule_tmp)
# static menu items (without regex)
elif key == 'title':
tmp = CListItem()
tmp['title'] = value
if tmpList.skill.find('videoTitle') > -1:
tmp['videoTitle'] = value
elif key == 'url':
tmp['url'] = value
if lItem:
tmp.merge(lItem)
if tmpList.catcher != '':
tmp['referer'] = value
if not hasOwnCfg:
tmp['catcher'] = tmpList.catcher
tmp['definedIn'] = cfgFile
items.append(tmp)
tmp = None
elif tmp != None:
if key == 'cfg':
hasOwnCfg = True
tmp[key] = value
tmpList.items = items
tmpList.cfg = cfgFile
return tmpList
def __parseHtml(self, url, data, rules, skills, definedIn, lItem):
#common.log('_parseHtml called')
items = []
for item_rule in rules:
# common.log('rule: ' + item_rule.infos)
if not hasattr(item_rule, 'precheck') or (item_rule.precheck in data):
revid = re.compile(item_rule.infos, re.IGNORECASE + re.DOTALL + re.MULTILINE)
for reinfos in revid.findall(data):
tmp = CListItem()
if lItem['referer']:
tmp['referer'] = lItem['referer']
if item_rule.order.find('|') != -1:
infos_names = item_rule.order.split('|')
infos_values = list(reinfos)
i = 0
for name in infos_names:
tmp[name] = infos_values[i]
i = i+1
else:
tmp[item_rule.order] = reinfos
for info in item_rule.info_list:
info_value = tmp[info.name]
if info_value:
if info.build.find('%s') != -1:
tmpVal = enc.smart_unicode(info.build % enc.smart_unicode(info_value))
tmp[info.name] = tmpVal
continue
if info.build.find('%s') != -1:
if info.src.__contains__('+'):
tmpArr = info.src.split('+')
src = ''
for t in tmpArr:
t = t.strip()
if t.find('\'') != -1:
src = src + t.strip('\'')
else:
src = src + enc.smart_unicode(tmp[t])
elif info.src.__contains__('||'):
variables = info.src.split('||')
src = firstNonEmpty(tmp, variables)
else:
src = tmp[info.src]
if src and info.convert != []:
tmp['referer'] = url
src = self.__parseCommands(tmp, src, info.convert)
if isinstance(src, dict):
for dKey in src:
tmp[dKey] = src[dKey]
src = src.values()[0]
info_value = info.build % (enc.smart_unicode(src))
else:
info_value = info.build
tmp[info.name] = info_value
tmp['url'] = enc.smart_unicode(item_rule.url_build % (enc.smart_unicode(tmp['url'])))
tmp.merge(lItem)
if item_rule.skill.find('append') != -1:
tmp['url'] = url + tmp['url']
if item_rule.skill.find('space') != -1:
tmp['title'] = ' %s ' % tmp['title'].strip()
if skills.find('videoTitle') > -1:
tmp['videoTitle'] = tmp['title']
tmp['definedIn'] = definedIn
items.append(tmp)
return items
def __parseCommands(self, item, src, convCommands):
#common.log('_parseCommands called')
# helping function
def parseCommand(txt):
command = {"command": txt, "params": ""}
if txt.find("(") > -1:
command["command"] = txt[0:txt.find("(")]
command["params"] = txt[len(command["command"]) + 1:-1]
return command
try:
src = src.encode('utf-8')
except:
pass
for convCommand in convCommands:
pComm = parseCommand(convCommand)
command = pComm["command"]
params = pComm["params"]
if params.find('@REFERER@'):
referer = item['referer']
if not referer:
referer = ''
params = params.replace('@REFERER@', referer)
if command == 'convDate':
src = cc.convDate(params, src)
elif command == 'convTimestamp':
src = cc.convTimestamp(params, src)
elif command == 'select':
src = cc.select(params, src)
if not src:
continue
elif command == 'smart_unicode':
src = enc.smart_unicode(params.strip("'").replace('%s', src))
elif command == 'safeGerman':
src = enc.safeGerman(src)
elif command == 'safeRegex':
src = enc.safeRegexEncoding(params.strip("'").replace('%s', enc.smart_unicode(src)))
elif command == 'replaceFromDict':
dictName = str(params.strip('\''))
path = os.path.join(common.Paths.dictsDir, dictName + '.txt')
if not (os.path.exists(path)):
common.log('Dictionary file not found: ' + path)
continue
src = cc.replaceFromDict(path, src)
elif command == 'time':
src = time.time()
elif command == 'timediff':
src = dt.timediff(src,params.strip('\''))
elif command == 'offset':
src = cc.offset(params, src)
elif command == 'getSource':
src = cc.getSource(params, src)
elif command == 'getRedirect':
src = get_redirected_url(params.strip("'").replace('%s', src))
elif command == 'quote':
try:
src = urllib.quote(params.strip("'").replace('%s', urllib.quote(src)))
except:
cleanParams = params.strip("'")
cleanParams = cleanParams.replace("%s",src.encode('utf-8'))
src = urllib.quote(cleanParams)
elif command == 'unquote':
src = urllib.unquote(params.strip("'").replace('%s', src))
elif command == 'parseText':
src = cc.parseText(item, params, src)
elif command == 'getInfo':
src = cc.getInfo(item, params, src)
elif command == 'decodeBase64':
src = cc.decodeBase64(src)
elif command == 'decodeRawUnicode':
src = cc.decodeRawUnicode(src)
elif command == 'replace':
src = cc.replace(params, src)
elif command == 'replaceRegex':
src = cc.replaceRegex(params, src)
elif command == 'ifEmpty':
src = cc.ifEmpty(item, params, src)
elif command == 'isEqual':
src = cc.isEqual(item, params, src)
elif command == 'ifFileExists':
src = cc.ifFileExists(item, params, src)
elif command == 'ifExists':
src = cc.ifExists(item, params, src)
elif command == 'encryptJimey':
src = crypt.encryptJimey(params.strip("'").replace('%s', src))
elif command == 'destreamer':
src = crypt.destreamer(params.strip("'").replace('%s', src))
elif command == 'unixTimestamp':
src = dt.getUnixTimestamp()
elif command == 'urlMerge':
src = cc.urlMerge(params, src)
elif command == 'translate':
try:
src = common.translate(int(src))
except:
pass
elif command == 'camelcase':
src = enc.smart_unicode(src)
src = string.capwords(string.capwords(src, '-'))
elif command == 'demystify':
print 'demystify'
src = crypt.doDemystify(src)
print 'after demystify',src
elif command == 'random':
paramArr = params.split(',')
minimum = int(paramArr[0])
maximum = int(paramArr[1])
src = str(random.randrange(minimum,maximum))
elif command == 'debug':
common.log('Debug from cfg file: ' + src)
elif command == 'divide':
paramArr = params.split(',')
a = paramArr[0].strip().strip("'").replace('%s', src)
a = resolveVariable(a, item)
b = paramArr[1].strip().strip("'").replace('%s', src)
b = resolveVariable(b, item)
if not a or not b:
continue
a = int(a)
b = int(b)
try:
src = str(a/b)
except:
pass
return src
def resolveVariable(varStr, item):
if varStr.startswith('@') and varStr.endswith('@'):
return item.getInfo(varStr.strip('@'))
return varStr
def firstNonEmpty(tmp, variables):
for v in variables:
vClean = v.strip()
if vClean.find("'") != -1:
vClean = vClean.strip("'")
else:
vClean = tmp.getInfo(vClean)
if vClean != '':
return vClean
return ''
|
Pirata-Repository/Pirata
|
plugin.video.SportsDevil/lib/parser.py
|
Python
|
gpl-2.0
| 27,695
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/snapshot.ui'
#
# Created: Mon Aug 31 02:59:12 2015
# by: PyQt4 UI code generator 4.11.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ScreenshotWidget(object):
def setupUi(self, ScreenshotWidget):
ScreenshotWidget.setObjectName(_fromUtf8("ScreenshotWidget"))
ScreenshotWidget.resize(342, 700)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(4)
sizePolicy.setHeightForWidth(ScreenshotWidget.sizePolicy().hasHeightForWidth())
ScreenshotWidget.setSizePolicy(sizePolicy)
ScreenshotWidget.setMinimumSize(QtCore.QSize(340, 600))
ScreenshotWidget.setMouseTracking(True)
self.layout = QtGui.QVBoxLayout(ScreenshotWidget)
self.layout.setMargin(1)
self.layout.setObjectName(_fromUtf8("layout"))
self.currentScreenshot = ActLabel(ScreenshotWidget)
self.currentScreenshot.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.currentScreenshot.sizePolicy().hasHeightForWidth())
self.currentScreenshot.setSizePolicy(sizePolicy)
self.currentScreenshot.setMinimumSize(QtCore.QSize(300, 0))
font = QtGui.QFont()
font.setPointSize(14)
self.currentScreenshot.setFont(font)
self.currentScreenshot.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))
self.currentScreenshot.setMouseTracking(True)
self.currentScreenshot.setStyleSheet(_fromUtf8("QLabel{\n"
"border-color: rgb(154, 154, 154); \n"
"border-style: solid; \n"
"border-width: 2px; \n"
"border-radius: 6px;\n"
"}"))
self.currentScreenshot.setFrameShape(QtGui.QFrame.StyledPanel)
self.currentScreenshot.setFrameShadow(QtGui.QFrame.Sunken)
self.currentScreenshot.setScaledContents(True)
self.currentScreenshot.setAlignment(QtCore.Qt.AlignCenter)
self.currentScreenshot.setWordWrap(True)
self.currentScreenshot.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.currentScreenshot.setObjectName(_fromUtf8("currentScreenshot"))
self.layout.addWidget(self.currentScreenshot)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem)
self.newS = QtGui.QPushButton(ScreenshotWidget)
self.newS.setMinimumSize(QtCore.QSize(44, 44))
font = QtGui.QFont()
font.setPointSize(14)
self.newS.setFont(font)
self.newS.setMouseTracking(True)
self.newS.setFocusPolicy(QtCore.Qt.NoFocus)
self.newS.setStyleSheet(_fromUtf8("QPushButton{ \n"
" background-color: rgb(255, 255, 255);\n"
" border-style: outset;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" border-color: rgb(193, 193, 193);\n"
" border-style: solid;\n"
" padding: 6px;\n"
" \n"
"}\n"
"QPushButton:pressed { \n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" background-color: rgb(48, 131, 251);\n"
" color: rgb(255, 255, 255);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" border-color: rgb(164, 205, 255);\n"
" border-radius: 6px;\n"
" border-width: 3px;\n"
" border-style: solid;\n"
"}"))
self.newS.setObjectName(_fromUtf8("newS"))
self.buttonLayout.addWidget(self.newS)
self.layout.addLayout(self.buttonLayout)
self.extraScreenGroup = FocusGroupBox(ScreenshotWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(8)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.extraScreenGroup.sizePolicy().hasHeightForWidth())
self.extraScreenGroup.setSizePolicy(sizePolicy)
self.extraScreenGroup.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(11)
self.extraScreenGroup.setFont(font)
self.extraScreenGroup.setMouseTracking(True)
self.extraScreenGroup.setCheckable(True)
self.extraScreenGroup.setObjectName(_fromUtf8("extraScreenGroup"))
self.horizontalLayout = QtGui.QHBoxLayout(self.extraScreenGroup)
self.horizontalLayout.setMargin(3)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.scrollArea = QtGui.QScrollArea(self.extraScreenGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea.sizePolicy().hasHeightForWidth())
self.scrollArea.setSizePolicy(sizePolicy)
self.scrollArea.setMinimumSize(QtCore.QSize(0, 110))
font = QtGui.QFont()
font.setPointSize(14)
self.scrollArea.setFont(font)
self.scrollArea.setMouseTracking(True)
self.scrollArea.setFocusPolicy(QtCore.Qt.NoFocus)
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.extraScreenshots = QtGui.QWidget()
self.extraScreenshots.setGeometry(QtCore.QRect(0, 0, 326, 110))
self.extraScreenshots.setMinimumSize(QtCore.QSize(0, 110))
self.extraScreenshots.setMouseTracking(True)
self.extraScreenshots.setObjectName(_fromUtf8("extraScreenshots"))
self.extraScreenshotLayout = QtGui.QHBoxLayout(self.extraScreenshots)
self.extraScreenshotLayout.setMargin(0)
self.extraScreenshotLayout.setObjectName(_fromUtf8("extraScreenshotLayout"))
self.scrollArea.setWidget(self.extraScreenshots)
self.horizontalLayout.addWidget(self.scrollArea)
self.layout.addWidget(self.extraScreenGroup)
self.retranslateUi(ScreenshotWidget)
QtCore.QObject.connect(self.extraScreenGroup, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.scrollArea.setVisible)
QtCore.QMetaObject.connectSlotsByName(ScreenshotWidget)
def retranslateUi(self, ScreenshotWidget):
ScreenshotWidget.setWindowTitle(_translate("ScreenshotWidget", "Form", None))
self.currentScreenshot.setAccessibleName(_translate("ScreenshotWidget", "S", None))
self.currentScreenshot.setText(_translate("ScreenshotWidget", "Select a snapshot", None))
self.newS.setText(_translate("ScreenshotWidget", "New", None))
self.extraScreenGroup.setAccessibleName(_translate("ScreenshotWidget", "AS", None))
self.extraScreenGroup.setTitle(_translate("ScreenshotWidget", "Screenshots", None))
from aui.mi.visual import FocusGroupBox
from aui.utilities.ActiveLabel import ActLabel
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
ScreenshotWidget = QtGui.QWidget()
ui = Ui_ScreenshotWidget()
ui.setupUi(ScreenshotWidget)
ScreenshotWidget.show()
sys.exit(app.exec_())
|
argenortega/AUI
|
aui/gui/snapshots/ui_snapshot.py
|
Python
|
mit
| 7,897
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'France - VAT Anti-Fraud Certification (CGI 286 I-3 bis)',
'version': '1.0',
'category': 'Accounting',
'description': """
This add-on brings the technical requirements of the French regulation CGI art. 286, I. 3° bis that stipulates certain criteria concerning the inalterability, security, storage and archiving of data related to sales to private individuals (B2C).
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
The module adds following features:
Inalterability: deactivation of all the ways to cancel or modify key data, invoices and journal entries
Security: chaining algorithm to verify the inalterability
Storage: automatic sales closings with computation of both period and cumulative totals (daily, monthly, annually)
Access to download the mandatory Certificate of Conformity delivered by Odoo SA (only for Odoo Enterprise users)
""",
'depends': ['l10n_fr'],
'installable': True,
'auto_install': False,
'application': False,
'data': [
'data/account_move.xml',
'views/res_config.xml',
],
'post_init_hook': '_setup_inalterability',
}
|
t3dev/odoo
|
addons/l10n_fr_certification/__manifest__.py
|
Python
|
gpl-3.0
| 1,434
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def ami_score(U, V):
return metrics.adjusted_mutual_info_score(U, V)
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
ami_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(bottom=-0.05, top=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(bottom=-0.05, top=1.05)
plt.legend(plots, names)
plt.show()
|
chrsrds/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
Python
|
bsd-3-clause
| 4,351
|
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
class VirtualInterface(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'address': fields.StringField(nullable=True),
'network_id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'uuid': fields.UUIDField(),
}
@staticmethod
def _from_db_object(context, vif, db_vif):
for field in vif.fields:
vif[field] = db_vif[field]
vif._context = context
vif.obj_reset_changes()
return vif
@base.remotable_classmethod
def get_by_id(cls, context, vif_id):
db_vif = db.virtual_interface_get(context, vif_id)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable_classmethod
def get_by_uuid(cls, context, vif_uuid):
db_vif = db.virtual_interface_get_by_uuid(context, vif_uuid)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable_classmethod
def get_by_address(cls, context, address):
db_vif = db.virtual_interface_get_by_address(context, address)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable_classmethod
def get_by_instance_and_network(cls, context, instance_uuid, network_id):
db_vif = db.virtual_interface_get_by_instance_and_network(context,
instance_uuid, network_id)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_vif = db.virtual_interface_create(context, updates)
self._from_db_object(context, self, db_vif)
@base.remotable_classmethod
def delete_by_instance_uuid(cls, context, instance_uuid):
db.virtual_interface_delete_by_instance(context, instance_uuid)
class VirtualInterfaceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('VirtualInterface'),
}
child_versions = {
'1.0': '1.0',
}
@base.remotable_classmethod
def get_all(cls, context):
db_vifs = db.virtual_interface_get_all(context)
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid,
use_slave=use_slave)
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/objects/virtual_interface.py
|
Python
|
gpl-2.0
| 3,724
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (C) 2010-2013, Ryan Fan <ryan.fan@oracle.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from sdk.recv.base import WXMPRecvEvent
from sdk.recv import RECV_EVENT_TYPE
class WXMPRecvScanEvent(WXMPRecvEvent):
def __init__(self, core):
super(WXMPRecvScanEvent, self).__init__(core)
self.type = RECV_EVENT_TYPE.SCAN
self.event = self.meta.get('Event', None)
if not self.event:
raise Exception("Failed to initialize WXMPRecvScanEvent because Event doesn't exist!")
self.event_key = self.meta.get('EventKey', None)
if not self.event_key:
raise Exception("Failed to initialize WXMPRecvScanEvent because EventKey doesn't exist!")
self.ticket = self.meta.get('Ticket', None)
if not self.ticket:
raise Exception("Failed to initialize WXMPRecvScanEvent because Ticket doesn't exist!")
def debug(self):
print "{0}(Event:{1}, EventKey:{2}, Ticket:{3})".format(
self.__class__.__name__,
self.event,
self.event_key,
self.ticket)
|
rfancn/wxgigo
|
wxgigo/wxmp/sdk/recv/event/scan.py
|
Python
|
mit
| 1,792
|
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import numpy.testing as npt
import pdb
import gsd.hoomd
import sys
import clustering as cl
#from context import clustering as cl
#from context import smoluchowski as smol
from cdistances import conOptDistanceCython,alignDistancesCython
#import imp
#cl = imp.load_source('cl','/home/rachael/Analysis_and_run_code/analysis/cluster_analysis/clustering/clustering.py')
data_path = op.join(cl.__path__[0], 'data')
def test_write_out_frame():
fname = 'mols8.gsd'
traj = gsd.hoomd.open(op.join(data_path, fname))
box = traj[0].configuration.box
ats = {'contact':17}
cutoff= 1.1*1.1
molno = 8
cldict = {'contact':cutoff}
syst = cl.SnapSystem(traj,ats,molno,cldict)
syst.get_clusters_serial('contact',box)
syst.writeCIDs('contact',op.join(data_path,'mols8cIDs.dat'))
cIDfile = op.join(data_path,'mols8cIDs.dat')
cIDfile = open(cIDfile)
lines = cIDfile.readlines()
cIDfile.close()
line = lines[35]
cIDsf = [float(c) for c in line.split()]
cIDs = [int(c) for c in cIDsf]
cl.writeFrameForVMD(cIDs,molno,ats['contact'],
op.join(data_path,'testframe35.dat'))
|
ramansbach/cluster_analysis
|
clustering/tests/test_visualization.py
|
Python
|
mit
| 1,247
|
"""Resize fields
Revision ID: f2b0984f780
Revises: 37e42fa9d88e
Create Date: 2015-07-03 12:35:58.448260
"""
# revision identifiers, used by Alembic.
revision = 'f2b0984f780'
down_revision = '37e42fa9d88e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column("rlmss", "name", type_ = sa.Unicode(255))
op.alter_column("rlmss", "location", type_ = sa.Unicode(255))
def downgrade():
pass
|
labsland/labmanager
|
alembic/versions/f2b0984f780_resize_fields.py
|
Python
|
bsd-2-clause
| 430
|
from __future__ import absolute_import
from .dicty import *
|
ales-erjavec/orange-bio
|
orangecontrib/bio/obiDicty.py
|
Python
|
gpl-3.0
| 61
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import pprint
import pydoc
import re
from collections import namedtuple
from sacred.utils import PATHCHANGE, iterate_flattened_separately
__sacred__ = True # marks files that should be filtered from stack traces
BLUE = '\033[94m'
GREEN = '\033[92m'
RED = '\033[91m'
ENDC = '\033[0m'
def non_unicode_repr(objekt, context, maxlevels, level):
"""
Used to override the pprint format method to get rid of unicode prefixes.
E.g.: 'John' instead of u'John'.
"""
repr_string, isreadable, isrecursive = pprint._safe_repr(objekt, context,
maxlevels, level)
if repr_string.startswith('u"') or repr_string.startswith("u'"):
repr_string = repr_string[1:]
return repr_string, isreadable, isrecursive
PRINTER = pprint.PrettyPrinter()
PRINTER.format = non_unicode_repr
ConfigEntry = namedtuple('ConfigEntry', 'key value added modified typechanged')
PathEntry = namedtuple('PathEntry', 'key added modified typechanged')
def iterate_marked(cfg, config_mods):
for path, value in iterate_flattened_separately(cfg):
if value is PATHCHANGE:
yield path, PathEntry(
key=path.rpartition('.')[2],
added=path in config_mods.added,
modified=path in config_mods.modified,
typechanged=config_mods.typechanged.get(path))
else:
yield path, ConfigEntry(
key=path.rpartition('.')[2],
value=value,
added=path in config_mods.added,
modified=path in config_mods.modified,
typechanged=config_mods.typechanged.get(path))
def format_entry(entry):
color = ""
if entry.typechanged:
color = RED
elif entry.added:
color = GREEN
elif entry.modified:
color = BLUE
end = ENDC if color else ""
if isinstance(entry, ConfigEntry):
return color + entry.key + " = " + PRINTER.pformat(entry.value) + end
else: # isinstance(entry, PathEntry):
return color + entry.key + ":" + end
def format_config(cfg, config_mods):
lines = ['Configuration ' + LEGEND + ':']
for path, entry in iterate_marked(cfg, config_mods):
indent = ' ' + ' ' * path.count('.')
lines.append(indent + format_entry(entry))
return "\n".join(lines)
LEGEND = '(' + BLUE + 'modified' + ENDC +\
', ' + GREEN + 'added' + ENDC +\
', ' + RED + 'typechanged' + ENDC + ')'
def print_config(_run):
"""
Print the updated configuration and exit.
Text is highlighted:
green: value modified
blue: value added
red: value modified but type changed
"""
final_config = _run.config
config_mods = _run.config_modifications
print(format_config(final_config, config_mods))
def help_for_command(command):
help_text = pydoc.text.document(command)
# remove backspaces
return re.subn('.\\x08', '', help_text)[0]
def print_dependencies(_run):
"""Print the detected source-files and dependencies."""
print('Sources:')
for source, digest in _run.experiment_info['sources']:
print(' {:<43} {}'.format(source, digest))
print('\nDependencies:')
for pack, version in _run.experiment_info['dependencies']:
print(' {:<20} >= {}'.format(pack, version))
|
kudkudak/sacred
|
sacred/commands.py
|
Python
|
mit
| 3,454
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.const.eos import ModDomain
from eos.const.eos import State
from eos.const.eve import AttrId
from eos.util.repr import make_repr_str
from .mixin.state import ImmutableStateMixin
class Implant(ImmutableStateMixin):
"""Represents an implant.
Args:
type_id: Identifier of item type which should serve as base for this
implant.
"""
def __init__(self, type_id):
super().__init__(type_id=type_id, state=State.offline)
# Item-specific properties
@property
def slot(self):
"""Return slot this implant takes."""
return self._type_attrs.get(AttrId.implantness)
# Attribute calculation-related properties
_modifier_domain = ModDomain.character
_owner_modifiable = False
_solsys_carrier = None
# Auxiliary methods
def __repr__(self):
spec = [['type_id', '_type_id']]
return make_repr_str(self, spec)
|
pyfa-org/eos
|
eos/item/implant.py
|
Python
|
lgpl-3.0
| 1,813
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Management of sessions - saved tabs/windows."""
import os
import sip
import os.path
from PyQt5.QtCore import pyqtSignal, QUrl, QObject, QPoint, QTimer
from PyQt5.QtWidgets import QApplication
import yaml
try:
from yaml import CSafeLoader as YamlLoader, CSafeDumper as YamlDumper
except ImportError: # pragma: no cover
from yaml import SafeLoader as YamlLoader, SafeDumper as YamlDumper
from qutebrowser.browser import tabhistory
from qutebrowser.utils import (standarddir, objreg, qtutils, log, usertypes,
message)
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.mainwindow import mainwindow
from qutebrowser.config import config
default = object() # Sentinel value
def init(parent=None):
"""Initialize sessions.
Args:
parent: The parent to use for the SessionManager.
"""
data_dir = standarddir.data()
if data_dir is None:
base_path = None
else:
base_path = os.path.join(standarddir.data(), 'sessions')
try:
os.mkdir(base_path)
except FileExistsError:
pass
session_manager = SessionManager(base_path, parent)
objreg.register('session-manager', session_manager)
class SessionError(Exception):
"""Exception raised when a session failed to load/save."""
class SessionNotFoundError(SessionError):
"""Exception raised when a session to be loaded was not found."""
class SessionManager(QObject):
"""Manager for sessions.
Attributes:
_base_path: The path to store sessions under.
_last_window_session: The session data of the last window which was
closed.
_current: The name of the currently loaded session, or None.
did_load: Set when a session was loaded.
Signals:
update_completion: Emitted when the session completion should get
updated.
"""
update_completion = pyqtSignal()
def __init__(self, base_path, parent=None):
super().__init__(parent)
self._current = None
self._base_path = base_path
self._last_window_session = None
self.did_load = False
def _get_session_path(self, name, check_exists=False):
"""Get the session path based on a session name or absolute path.
Args:
name: The name of the session.
check_exists: Whether it should also be checked if the session
exists.
"""
path = os.path.expanduser(name)
if os.path.isabs(path) and ((not check_exists) or
os.path.exists(path)):
return path
elif self._base_path is None:
if check_exists:
raise SessionNotFoundError(name)
else:
return None
else:
path = os.path.join(self._base_path, name + '.yml')
if check_exists and not os.path.exists(path):
raise SessionNotFoundError(path)
else:
return path
def exists(self, name):
"""Check if a named session exists."""
try:
self._get_session_path(name, check_exists=True)
except SessionNotFoundError:
return False
else:
return True
def _save_tab(self, tab, active):
"""Get a dict with data for a single tab.
Args:
tab: The WebView to save.
active: Whether the tab is currently active.
"""
data = {'history': []}
if active:
data['active'] = True
history = tab.page().history()
for idx, item in enumerate(history.items()):
qtutils.ensure_valid(item)
item_data = {
'url': bytes(item.url().toEncoded()).decode('ascii'),
'title': item.title(),
}
if item.originalUrl() != item.url():
encoded = item.originalUrl().toEncoded()
item_data['original-url'] = bytes(encoded).decode('ascii')
if history.currentItemIndex() == idx:
item_data['active'] = True
user_data = item.userData()
if history.currentItemIndex() == idx:
pos = tab.page().mainFrame().scrollPosition()
item_data['zoom'] = tab.zoomFactor()
item_data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
elif user_data is not None:
if 'zoom' in user_data:
item_data['zoom'] = user_data['zoom']
if 'scroll-pos' in user_data:
pos = user_data['scroll-pos']
item_data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
data['history'].append(item_data)
return data
def _save_all(self):
"""Get a dict with data for all windows/tabs."""
data = {'windows': []}
for win_id in objreg.window_registry:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
main_window = objreg.get('main-window', scope='window',
window=win_id)
# We could be in the middle of destroying a window here
if sip.isdeleted(main_window):
continue
win_data = {}
active_window = QApplication.instance().activeWindow()
if getattr(active_window, 'win_id', None) == win_id:
win_data['active'] = True
win_data['geometry'] = bytes(main_window.saveGeometry())
win_data['tabs'] = []
for i, tab in enumerate(tabbed_browser.widgets()):
active = i == tabbed_browser.currentIndex()
win_data['tabs'].append(self._save_tab(tab, active))
data['windows'].append(win_data)
return data
def _get_session_name(self, name):
"""Helper for save to get the name to save the session to.
Args:
name: The name of the session to save, or the 'default' sentinel
object.
"""
if name is default:
name = config.get('general', 'session-default-name')
if name is None:
if self._current is not None:
name = self._current
else:
name = 'default'
return name
def save(self, name, last_window=False, load_next_time=False):
"""Save a named session.
Args:
name: The name of the session to save, or the 'default' sentinel
object.
last_window: If set, saves the saved self._last_window_session
instead of the currently open state.
load_next_time: If set, prepares this session to be load next time.
Return:
The name of the saved session.
"""
name = self._get_session_name(name)
path = self._get_session_path(name)
if path is None:
raise SessionError("No data storage configured.")
log.sessions.debug("Saving session {} to {}...".format(name, path))
if last_window:
data = self._last_window_session
assert data is not None
else:
data = self._save_all()
log.sessions.vdebug("Saving data: {}".format(data))
try:
with qtutils.savefile_open(path) as f:
yaml.dump(data, f, Dumper=YamlDumper, default_flow_style=False,
encoding='utf-8', allow_unicode=True)
except (OSError, UnicodeEncodeError, yaml.YAMLError) as e:
raise SessionError(e)
else:
self.update_completion.emit()
if load_next_time:
state_config = objreg.get('state-config')
state_config['general']['session'] = name
return name
def save_last_window_session(self):
"""Temporarily save the session for the last closed window."""
self._last_window_session = self._save_all()
def _load_tab(self, new_tab, data):
"""Load yaml data into a newly opened tab."""
entries = []
for histentry in data['history']:
user_data = {}
if 'zoom' in data:
# The zoom was accidentally stored in 'data' instead of per-tab
# earlier.
# See https://github.com/The-Compiler/qutebrowser/issues/728
user_data['zoom'] = data['zoom']
elif 'zoom' in histentry:
user_data['zoom'] = histentry['zoom']
if 'scroll-pos' in data:
# The scroll position was accidentally stored in 'data' instead
# of per-tab earlier.
# See https://github.com/The-Compiler/qutebrowser/issues/728
pos = data['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
elif 'scroll-pos' in histentry:
pos = histentry['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
active = histentry.get('active', False)
url = QUrl.fromEncoded(histentry['url'].encode('ascii'))
if 'original-url' in histentry:
orig_url = QUrl.fromEncoded(
histentry['original-url'].encode('ascii'))
else:
orig_url = url
entry = tabhistory.TabHistoryItem(
url=url, original_url=orig_url, title=histentry['title'],
active=active, user_data=user_data)
entries.append(entry)
if active:
new_tab.titleChanged.emit(histentry['title'])
try:
new_tab.page().load_history(entries)
except ValueError as e:
raise SessionError(e)
def load(self, name, temp=False):
"""Load a named session.
Args:
name: The name of the session to load.
temp: If given, don't set the current session.
"""
path = self._get_session_path(name, check_exists=True)
try:
with open(path, encoding='utf-8') as f:
data = yaml.load(f, Loader=YamlLoader)
except (OSError, UnicodeDecodeError, yaml.YAMLError) as e:
raise SessionError(e)
log.sessions.debug("Loading session {} from {}...".format(name, path))
for win in data['windows']:
window = mainwindow.MainWindow(geometry=win['geometry'])
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
tab_to_focus = None
for i, tab in enumerate(win['tabs']):
new_tab = tabbed_browser.tabopen()
self._load_tab(new_tab, tab)
if tab.get('active', False):
tab_to_focus = i
if tab_to_focus is not None:
tabbed_browser.setCurrentIndex(tab_to_focus)
if win.get('active', False):
QTimer.singleShot(0, tabbed_browser.activateWindow)
self.did_load = True
if not name.startswith('_') and not temp:
self._current = name
def delete(self, name):
"""Delete a session."""
path = self._get_session_path(name, check_exists=True)
os.remove(path)
self.update_completion.emit()
def list_sessions(self):
"""Get a list of all session names."""
sessions = []
if self._base_path is None:
return sessions
for filename in os.listdir(self._base_path):
base, ext = os.path.splitext(filename)
if ext == '.yml':
sessions.append(base)
return sessions
@cmdutils.register(completion=[usertypes.Completion.sessions],
instance='session-manager')
def session_load(self, name, clear=False, temp=False, force=False):
"""Load a session.
Args:
name: The name of the session.
clear: Close all existing windows.
temp: Don't set the current session for :session-save.
force: Force loading internal sessions (starting with an
underline).
"""
if name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to load anyways.".format(name))
old_windows = list(objreg.window_registry.values())
try:
self.load(name, temp=temp)
except SessionNotFoundError:
raise cmdexc.CommandError("Session {} not found!".format(name))
except SessionError as e:
raise cmdexc.CommandError("Error while loading session: {}"
.format(e))
else:
if clear:
for win in old_windows:
win.close()
@cmdutils.register(name=['session-save', 'w'], win_id='win_id',
completion=[usertypes.Completion.sessions],
instance='session-manager')
def session_save(self, win_id, name: {'type': str}=default, current=False,
quiet=False, force=False):
"""Save a session.
Args:
win_id: The current window ID.
name: The name of the session. If not given, the session configured
in general -> session-default-name is saved.
current: Save the current session instead of the default.
quiet: Don't show confirmation message.
force: Force saving internal sessions (starting with an underline).
"""
if (name is not default and
name.startswith('_') and # pylint: disable=no-member
not force):
raise cmdexc.CommandError("{} is an internal session, use --force "
"to save anyways.".format(name))
if current:
if self._current is None:
raise cmdexc.CommandError("No session loaded currently!")
name = self._current
assert not name.startswith('_')
try:
name = self.save(name)
except SessionError as e:
raise cmdexc.CommandError("Error while saving session: {}"
.format(e))
else:
if not quiet:
message.info(win_id, "Saved session {}.".format(name),
immediately=True)
@cmdutils.register(completion=[usertypes.Completion.sessions],
instance='session-manager')
def session_delete(self, name, force=False):
"""Delete a session.
Args:
name: The name of the session.
force: Force deleting internal sessions (starting with an
underline).
"""
if name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to delete anyways.".format(name))
try:
self.delete(name)
except SessionNotFoundError:
raise cmdexc.CommandError("Session {} not found!".format(name))
except (OSError, SessionError) as e:
log.sessions.exception("Error while deleting session!")
raise cmdexc.CommandError("Error while deleting session: {}"
.format(e))
|
haxwithaxe/qutebrowser
|
qutebrowser/misc/sessions.py
|
Python
|
gpl-3.0
| 16,440
|
#!/usr/bin/env python
# coding=utf-8
"""385. Ellipses inside triangles
https://projecteuler.net/problem=385
For any triangle T in the plane, it can be shown that there is a unique
ellipse with largest area that is completely inside T.

For a given n, consider triangles T such that:
\- the vertices of T have integer coordinates with absolute value ≤ n, and
\- the **foci** 1 of the largest-area ellipse inside T are (√13,0) and
(-√13,0).
Let A(n) be the sum of the areas of all such triangles.
For example, if n = 8, there are two such triangles. Their vertices are
(-4,-3),(-4,3),(8,0) and (4,3),(4,-3),(-8,0), and the area of each triangle is
36. Thus A(8) = 36 + 36 = 72.
It can be verified that A(10) = 252, A(100) = 34632 and A(1000) = 3529008.
Find A(1 000 000 000).
1The **foci** (plural of **focus** ) of an ellipse are two points A and B such
that for every point P on the boundary of the ellipse, AP \+ PB is constant.
"""
|
openqt/algorithms
|
projecteuler/pe385-ellipses-inside-triangles.py
|
Python
|
gpl-3.0
| 1,022
|
#!/usr/bin/env python
# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import port_testcase
from webkitpy.layout_tests.port.webkit import WebKitPort
from webkitpy.layout_tests.port.config_mock import MockConfig
from webkitpy.tool.mocktool import MockOptions
class TestWebKitPort(WebKitPort):
port_name = "testwebkitport"
def __init__(self, symbols_string=None,
expectations_file=None, skips_file=None, host=None, config=None,
**kwargs):
self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
host = host or MockSystemHost()
config = config or MockConfig()
WebKitPort.__init__(self, host=host, config=config, **kwargs)
def all_test_configurations(self):
return [self.test_configuration()]
def _symbols_string(self):
return self.symbols_string
def _tests_for_other_platforms(self):
return ["media", ]
def _tests_for_disabled_features(self):
return ["accessibility", ]
class WebKitPortTest(port_testcase.PortTestCase):
port_name = 'webkit'
port_maker = TestWebKitPort
def test_check_build(self):
pass
def test_driver_cmd_line(self):
pass
def test_baseline_search_path(self):
pass
def test_path_to_test_expectations_file(self):
port = TestWebKitPort()
port._options = MockOptions(webkit_test_runner=False)
self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
port = TestWebKitPort()
port._options = MockOptions(webkit_test_runner=True)
self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
port = TestWebKitPort()
port.host.filesystem.files['/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations'] = 'some content'
port._options = MockOptions(webkit_test_runner=False)
self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
def test_skipped_directories_for_features(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set(["animations/3d", "transforms/3d"])
port = TestWebKitPort(None, supported_features)
port._runtime_feature_list = lambda: supported_features
result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
self.assertEqual(result_directories, expected_directories)
def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set([])
result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
self.assertEqual(result_directories, expected_directories)
def test_skipped_tests_for_unsupported_features_empty_test_list(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set([])
result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=None))
self.assertEqual(result_directories, expected_directories)
def test_skipped_layout_tests(self):
self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(test_list=[]), set(['media']))
def test_skipped_file_search_paths(self):
port = TestWebKitPort()
self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport']))
port._name = "testwebkitport-version"
self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version']))
port._options = MockOptions(webkit_test_runner=True)
self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'testwebkitport-wk2', 'wk2']))
port._options = MockOptions(additional_platform_directory=["internal-testwebkitport"])
self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'internal-testwebkitport']))
def test_root_option(self):
port = TestWebKitPort()
port._options = MockOptions(root='/foo')
self.assertEqual(port._path_to_driver(), "/foo/DumpRenderTree")
def test_test_expectations(self):
# Check that we read the expectations file
host = MockSystemHost()
host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations',
'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = TEXT\n')
port = TestWebKitPort(host=host)
self.assertEqual(''.join(port.expectations_dict().values()), 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = TEXT\n')
def test_build_driver(self):
output = OutputCapture()
port = TestWebKitPort()
# Delay setting _executive to avoid logging during construction
port._executive = MockExecutive(should_log=True)
port._options = MockOptions(configuration="Release") # This should not be necessary, but I think TestWebKitPort is actually reading from disk (and thus detects the current configuration).
expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
# Make sure when passed --webkit-test-runner we build the right tool.
port._options = MockOptions(webkit_test_runner=True, configuration="Release")
expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\nMOCK run_command: ['Tools/Scripts/build-webkittestrunner', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=''))
# Make sure we show the build log when --verbose is passed, which we simulate by setting the logging level to DEBUG.
output.set_log_level(logging.DEBUG)
port._options = MockOptions(configuration="Release")
expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
expected_logs = "Output of ['Tools/Scripts/build-dumprendertree', '--release']:\nMOCK output of child process\n"
self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
output.set_log_level(logging.INFO)
# Make sure that failure to build returns False.
port._executive = MockExecutive(should_log=True, should_throw=True)
# Because WK2 currently has to build both webkittestrunner and DRT, if DRT fails, that's the only one it tries.
expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n"
expected_logs = "MOCK ScriptError\n\nMOCK output of child process\n"
self.assertFalse(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
def _assert_config_file_for_platform(self, port, platform, config_file):
self.assertEquals(port._apache_config_file_name_for_platform(platform), config_file)
def test_linux_distro_detection(self):
port = TestWebKitPort()
self.assertFalse(port._is_redhat_based())
self.assertFalse(port._is_debian_based())
port._filesystem = MockFileSystem({'/etc/redhat-release': ''})
self.assertTrue(port._is_redhat_based())
self.assertFalse(port._is_debian_based())
port._filesystem = MockFileSystem({'/etc/debian_version': ''})
self.assertFalse(port._is_redhat_based())
self.assertTrue(port._is_debian_based())
def test_apache_config_file_name_for_platform(self):
port = TestWebKitPort()
self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
self._assert_config_file_for_platform(port, 'linux2', 'apache2-httpd.conf')
self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
port._is_redhat_based = lambda: True
self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd.conf')
port = TestWebKitPort()
port._is_debian_based = lambda: True
self._assert_config_file_for_platform(port, 'linux2', 'apache2-debian-httpd.conf')
self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd.conf')
def test_path_to_apache_config_file(self):
port = TestWebKitPort()
# Mock out _apache_config_file_name_for_platform to ignore the passed sys.platform value.
port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
self.assertEquals(port._path_to_apache_config_file(), '/mock-checkout/LayoutTests/http/conf/httpd.conf')
|
youfoh/webkit-efl
|
Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py
|
Python
|
lgpl-2.1
| 11,495
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq map grn`."""
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import (Personality, PersonalityGrnMap, HostGrnMap,
Cluster)
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.change_management import validate_prod_personality
from aquilon.worker.dbwrappers.grn import lookup_grn
from aquilon.worker.dbwrappers.host import (hostname_to_host, hostlist_to_hosts,
check_hostlist_size)
class CommandMapGrn(BrokerCommand):
requires_plenaries = True
required_parameters = ["target"]
require_usable_grn = True
def _update_dbobj(self, obj, target, grn, mapcls):
# Don't add twice the same tuple
for grn_rec in obj.grns:
if grn == grn_rec.grn and target == grn_rec.target:
return
obj.grns.append(mapcls(grn=grn, target=target))
def render(self, session, logger, plenaries, target, grn, eon_id, hostname, list,
membersof, personality, personality_stage, archetype,
justification, reason, user, **_):
dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
config=self.config,
usable_only=self.require_usable_grn)
if hostname:
objs = [hostname_to_host(session, hostname)]
mapcls = HostGrnMap
config_key = "host_grn_targets"
elif list:
check_hostlist_size(self.command, self.config, list)
objs = hostlist_to_hosts(session, list)
mapcls = HostGrnMap
config_key = "host_grn_targets"
elif membersof:
dbcluster = Cluster.get_unique(session, membersof, compel=True)
objs = dbcluster.hosts
mapcls = HostGrnMap
config_key = "host_grn_targets"
elif personality:
dbpersonality = Personality.get_unique(session, name=personality,
archetype=archetype,
compel=True)
objs = [dbpersonality.active_stage(personality_stage)]
mapcls = PersonalityGrnMap
config_key = "personality_grn_targets"
validate_prod_personality(objs[0], user, justification, reason, logger)
for obj in objs:
section = "archetype_" + obj.archetype.name
if self.config.has_option(section, config_key):
valid_targets = [s.strip() for s in
self.config.get(section, config_key).split(",")]
else:
raise ArgumentError("{0} has no valid GRN targets configured."
.format(obj.archetype))
if target not in valid_targets:
raise ArgumentError("Invalid target %s for archetype %s, please "
"choose from: %s." %
(target, obj.archetype.name,
", ".join(valid_targets)))
plenaries.add(obj)
self._update_dbobj(obj, target, dbgrn, mapcls)
session.flush()
plenaries.write()
return
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/commands/map_grn.py
|
Python
|
apache-2.0
| 4,020
|
"""
OEDocking utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
from openeye.oechem import *
from openeye.oedocking import *
def read_receptor(filename):
"""
Read a receptor from a file.
Parameters
----------
filename : str
Filename.
"""
receptor = OEMol()
OEReadReceptorFile(receptor, filename)
assert receptor.IsValid()
return receptor
class Docker(OEDock):
"""
Docking.
Parameters
----------
receptor : OEMol
Receptor.
scoring : int, optional (default OEDockMethod_Default)
Docking method.
resolution : int, optional (default OESearchResolution_Default)
Docking search resolution.
n_poses : int, optional (default 1)
Number of poses to return for each docked molecule.
component_scores : bool, optional (default True)
Whether to save individual components of the total score to SD fields.
annotate : bool, optional (default False)
Whether to save the contributions of individual atoms to the docking
score.
"""
def __init__(self, receptor, scoring=OEDockMethod_Default,
resolution=OESearchResolution_Default, n_poses=1,
component_scores=True, annotate=False):
super(Docker, self).__init__(scoring, resolution)
self.n_poses = n_poses
self.component_scores = component_scores
self.annotate = annotate
if not self.Initialize(receptor):
raise RuntimeError('Docking engine initialization failed.')
self.scores = [self.GetName()] # score names
if component_scores:
for score in sorted(self.GetComponentNames()):
self.scores.append(score)
def __call__(self, mol):
"""
Dock a multiconformer molecule.
Parameters
----------
mol : OEMol
Molecule to dock.
"""
return self.dock(mol)
def dock(self, mol):
"""
Dock a multiconformer molecule.
Component scores are saved in SD fields.
Parameters
----------
mol : OEMol
Molecule to dock.
"""
poses = OEMol()
self.DockMultiConformerMolecule(poses, mol, self.n_poses)
if poses.IsValid():
return poses, self.score(poses)
else:
return mol, None
def score(self, poses):
"""
Get scores and annotations for poses.
Parameters
----------
poses : OEMol
Docked poses.
"""
scores = np.zeros((poses.NumConfs(), len(self.scores)), dtype=float)
for i, pose in enumerate(poses.GetConfs()):
scores[i, 0] = self.ScoreLigand(pose)
OESetSDData(pose, self.GetName(), str(scores[i, 0]))
if self.component_scores:
for j, score in enumerate(self.scores[1:]):
scores[i, j + 1] = self.ScoreLigandComponent(pose, score)
OESetSDData(pose, score, str(scores[i, j + 1]))
if self.annotate:
self.AnnotatePose(poses)
return np.squeeze(scores)
class HybridDocker(Docker):
"""
Hybrid docking.
Parameters
----------
receptor : OEMol
Receptor.
scoring : int, optional (default OEDockMethod_Hybrid)
Docking method.
resolution : int, optional (default OESearchResolution_Default)
Docking search resolution.
n_poses : int, optional (default 1)
Number of poses to return for each docked molecule.
component_scores : bool, optional (default True)
Whether to save individual components of the total score.
annotate : bool, optional (default False)
Whether to save the contributions of individual atoms to the docking
score.
"""
def __init__(self, receptor, scoring=OEDockMethod_Hybrid,
resolution=OESearchResolution_Default, n_poses=1,
component_scores=True, annotate=False):
super(HybridDocker, self).__init__(receptor, scoring, resolution,
n_poses, component_scores, annotate)
|
skearnes/color-features
|
oe_utils/docking/__init__.py
|
Python
|
bsd-3-clause
| 4,260
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is largely copied from the Nagios module included in the
# Func project. Original copyright follows:
#
# func-nagios - Schedule downtime and enables/disable notifications
# Copyright 2011, Red Hat, Inc.
# Tim Bielawa <tbielawa@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license version 2 or any later version.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications.
description:
- "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
- You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
- When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
- When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
version_added: "0.7"
options:
action:
description:
- Action to take.
- servicegroup options were added in 2.0.
required: true
choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
"servicegroup_host_downtime" ]
host:
description:
- Host to operate on in Nagios.
required: false
default: null
cmdfile:
description:
- Path to the nagios I(command file) (FIFO pipe).
Only required if auto-detection fails.
required: false
default: auto-detected
author:
description:
- Author to leave downtime comments as.
Only usable with the C(downtime) action.
required: false
default: Ansible
comment:
version_added: "2.0"
description:
- Comment for C(downtime) action.
required: false
default: Scheduling downtime
minutes:
description:
- Minutes to schedule downtime for.
- Only usable with the C(downtime) action.
required: false
default: 30
services:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
C(service) is an alias for C(services).
B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions.
aliases: [ "service" ]
required: true
servicegroup:
version_added: "2.0"
description:
- The Servicegroup we want to set downtimes/alerts for.
B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
command:
description:
- The raw command to send to nagios, which
should not include the submitted time header or the line-feed
B(Required) option when using the C(command) action.
required: true
author: "Tim Bielawa (@tbielawa)"
'''
EXAMPLES = '''
# set 30 minutes of apache downtime
- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }}
# schedule an hour of HOST downtime
- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
# schedule an hour of HOST downtime, with a comment describing the reason
- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
comment='This host needs disciplined'
# schedule downtime for ALL services on HOST
- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }}
# schedule downtime for a few services
- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }}
# set 30 minutes downtime for all services in servicegroup foo
- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
# set 30 minutes downtime for all host in servicegroup foo
- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
# enable SMART disk alerts
- nagios: action=enable_alerts service=smart host={{ inventory_hostname }}
# "two services at once: disable httpd and nfs alerts"
- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }}
# disable HOST alerts
- nagios: action=disable_alerts service=host host={{ inventory_hostname }}
# silence ALL alerts
- nagios: action=silence host={{ inventory_hostname }}
# unsilence all alerts
- nagios: action=unsilence host={{ inventory_hostname }}
# SHUT UP NAGIOS
- nagios: action=silence_nagios
# ANNOY ME NAGIOS
- nagios: action=unsilence_nagios
# command something
- nagios: action=command command='DISABLE_FAILURE_PREDICTION'
'''
import ConfigParser
import types
import time
import os.path
######################################################################
def which_cmdfile():
locations = [
# rhel
'/etc/nagios/nagios.cfg',
# debian
'/etc/nagios3/nagios.cfg',
# older debian
'/etc/nagios2/nagios.cfg',
# bsd, solaris
'/usr/local/etc/nagios/nagios.cfg',
# groundwork it monitoring
'/usr/local/groundwork/nagios/etc/nagios.cfg',
# open monitoring distribution
'/omd/sites/oppy/tmp/nagios/nagios.cfg',
# ???
'/usr/local/nagios/etc/nagios.cfg',
'/usr/local/nagios/nagios.cfg',
'/opt/nagios/etc/nagios.cfg',
'/opt/nagios/nagios.cfg',
# icinga on debian/ubuntu
'/etc/icinga/icinga.cfg',
# icinga installed from source (default location)
'/usr/local/icinga/etc/icinga.cfg',
]
for path in locations:
if os.path.exists(path):
for line in open(path):
if line.startswith('command_file'):
return line.split('=')[1].strip()
return None
######################################################################
def main():
ACTION_CHOICES = [
'downtime',
'silence',
'unsilence',
'enable_alerts',
'disable_alerts',
'silence_nagios',
'unsilence_nagios',
'command',
'servicegroup_host_downtime',
'servicegroup_service_downtime',
]
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, default=None, choices=ACTION_CHOICES),
author=dict(default='Ansible'),
comment=dict(default='Scheduling downtime'),
host=dict(required=False, default=None),
servicegroup=dict(required=False, default=None),
minutes=dict(default=30),
cmdfile=dict(default=which_cmdfile()),
services=dict(default=None, aliases=['service']),
command=dict(required=False, default=None),
)
)
action = module.params['action']
host = module.params['host']
servicegroup = module.params['servicegroup']
minutes = module.params['minutes']
services = module.params['services']
cmdfile = module.params['cmdfile']
command = module.params['command']
##################################################################
# Required args per action:
# downtime = (minutes, service, host)
# (un)silence = (host)
# (enable/disable)_alerts = (service, host)
# command = command
#
# AnsibleModule will verify most stuff, we need to verify
# 'minutes' and 'service' manually.
##################################################################
if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
if not host:
module.fail_json(msg='no host specified for action requiring one')
######################################################################
if action == 'downtime':
# Make sure there's an actual service selected
if not services:
module.fail_json(msg='no service selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
######################################################################
if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
# Make sure there's an actual servicegroup selected
if not servicegroup:
module.fail_json(msg='no servicegroup selected to set downtime for')
# Make sure minutes is a number
try:
m = int(minutes)
if not isinstance(m, types.IntType):
module.fail_json(msg='minutes must be a number')
except Exception:
module.fail_json(msg='invalid entry for minutes')
##################################################################
if action in ['enable_alerts', 'disable_alerts']:
if not services:
module.fail_json(msg='a service is required when setting alerts')
if action in ['command']:
if not command:
module.fail_json(msg='no command passed for command action')
##################################################################
if not cmdfile:
module.fail_json(msg='unable to locate nagios.cfg')
##################################################################
ansible_nagios = Nagios(module, **module.params)
if module.check_mode:
module.exit_json(changed=True)
else:
ansible_nagios.act()
##################################################################
######################################################################
class Nagios(object):
"""
Perform common tasks in Nagios related to downtime and
notifications.
The complete set of external commands Nagios handles is documented
on their website:
http://old.nagios.org/developerinfo/externalcommands/commandlist.php
Note that in the case of `schedule_svc_downtime`,
`enable_svc_notifications`, and `disable_svc_notifications`, the
service argument should be passed as a list.
"""
def __init__(self, module, **kwargs):
self.module = module
self.action = kwargs['action']
self.author = kwargs['author']
self.comment = kwargs['comment']
self.host = kwargs['host']
self.servicegroup = kwargs['servicegroup']
self.minutes = int(kwargs['minutes'])
self.cmdfile = kwargs['cmdfile']
self.command = kwargs['command']
if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
self.services = kwargs['services']
else:
self.services = kwargs['services'].split(',')
self.command_results = []
def _now(self):
"""
The time in seconds since 12:00:00AM Jan 1, 1970
"""
return int(time.time())
def _write_command(self, cmd):
"""
Write the given command to the Nagios command file
"""
try:
fp = open(self.cmdfile, 'w')
fp.write(cmd)
fp.flush()
fp.close()
self.command_results.append(cmd.strip())
except IOError:
self.module.fail_json(msg='unable to write to nagios command file',
cmdfile=self.cmdfile)
def _fmt_dt_str(self, cmd, host, duration, author=None,
comment=None, start=None,
svc=None, fixed=1, trigger=0):
"""
Format an external-command downtime string.
cmd - Nagios command ID
host - Host schedule downtime on
duration - Minutes to schedule downtime for
author - Name to file the downtime as
comment - Reason for running this command (upgrade, reboot, etc)
start - Start of downtime in seconds since 12:00AM Jan 1 1970
Default is to use the entry time (now)
svc - Service to schedule downtime for, omit when for host downtime
fixed - Start now if 1, start when a problem is detected if 0
trigger - Optional ID of event to start downtime from. Leave as 0 for
fixed downtime.
Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
entry_time = self._now()
if start is None:
start = entry_time
hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
duration_s = (duration * 60)
end = start + duration_s
if not author:
author = self.author
if not comment:
comment = self.comment
if svc is not None:
dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
else:
# Downtime for a host if no svc specified
dt_args = [str(start), str(end), str(fixed), str(trigger),
str(duration_s), author, comment]
dt_arg_str = ";".join(dt_args)
dt_str = hdr + dt_arg_str + "\n"
return dt_str
def _fmt_notif_str(self, cmd, host=None, svc=None):
"""
Format an external-command notification string.
cmd - Nagios command ID.
host - Host to en/disable notifications on.. A value is not required
for global downtime
svc - Service to schedule downtime for. A value is not required
for host downtime.
Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
"""
entry_time = self._now()
notif_str = "[%s] %s" % (entry_time, cmd)
if host is not None:
notif_str += ";%s" % host
if svc is not None:
notif_str += ";%s" % svc
notif_str += "\n"
return notif_str
def schedule_svc_downtime(self, host, services=None, minutes=30):
"""
This command is used to schedule downtime for a particular
service.
During the specified downtime, Nagios will not send
notifications out about the service.
Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SVC_DOWNTIME"
if services is None:
services = []
for service in services:
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service)
self._write_command(dt_cmd_str)
def schedule_host_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for a particular
host.
During the specified downtime, Nagios will not send
notifications out about the host.
Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_host_svc_downtime(self, host, minutes=30):
"""
This command is used to schedule downtime for
all services associated with a particular host.
During the specified downtime, Nagios will not send
notifications out about the host.
SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular hostgroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
<end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
"""
cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the hosts.
Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30):
"""
This command is used to schedule downtime for all services in
a particular servicegroup.
During the specified downtime, Nagios will not send
notifications out about the services.
Note that scheduling downtime for services does not
automatically schedule downtime for the hosts those services
are associated with.
Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
<start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
<comment>
"""
cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes)
self._write_command(dt_cmd_str)
def disable_host_svc_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for all services on the specified host.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_host_notifications(self, host):
"""
This command is used to prevent notifications from being sent
out for the specified host.
Note that this command does not disable notifications for
services associated with this host.
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "DISABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def disable_svc_notifications(self, host, services=None):
"""
This command is used to prevent notifications from being sent
out for the specified service.
Note that this command does not disable notifications from
being sent out about the host.
Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "DISABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
self._write_command(notif_str)
def disable_servicegroup_host_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all hosts in the specified servicegroup.
Note that this command does not disable notifications for
services associated with hosts in this service group.
Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_servicegroup_svc_notifications(self, servicegroup):
"""
This command is used to prevent notifications from being sent
out for all services in the specified servicegroup.
Note that this does not prevent notifications from being sent
out about the hosts in this servicegroup.
Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
self._write_command(notif_str)
def disable_hostgroup_host_notifications(self, hostgroup):
"""
Disables notifications for all hosts in a particular
hostgroup.
Note that this does not disable notifications for the services
associated with the hosts in the hostgroup - see the
DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def disable_hostgroup_svc_notifications(self, hostgroup):
"""
Disables notifications for all services associated with hosts
in a particular hostgroup.
Note that this does not disable notifications for the hosts in
the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
command for that.
Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
self._write_command(notif_str)
def enable_host_notifications(self, host):
"""
Enables notifications for a particular host.
Note that this command does not enable notifications for
services associated with this host.
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
self._write_command(notif_str)
def enable_host_svc_notifications(self, host):
"""
Enables notifications for all services on the specified host.
Note that this does not enable notifications for the host.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
"""
cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, host)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_svc_notifications(self, host, services=None):
"""
Enables notifications for a particular service.
Note that this does not enable notifications for the host.
Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
"""
cmd = "ENABLE_SVC_NOTIFICATIONS"
if services is None:
services = []
nagios_return = True
return_str_list = []
for service in services:
notif_str = self._fmt_notif_str(cmd, host, svc=service)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def enable_hostgroup_host_notifications(self, hostgroup):
"""
Enables notifications for all hosts in a particular hostgroup.
Note that this command does not enable notifications for
services associated with the hosts in this hostgroup.
Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_hostgroup_svc_notifications(self, hostgroup):
"""
Enables notifications for all services that are associated
with hosts in a particular hostgroup.
Note that this does not enable notifications for the hosts in
this hostgroup.
Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
"""
cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, hostgroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_host_notifications(self, servicegroup):
"""
Enables notifications for all hosts that have services that
are members of a particular servicegroup.
Note that this command does not enable notifications for
services associated with the hosts in this servicegroup.
Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def enable_servicegroup_svc_notifications(self, servicegroup):
"""
Enables notifications for all services that are members of a
particular servicegroup.
Note that this does not enable notifications for the hosts in
this servicegroup.
Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
"""
cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
notif_str = self._fmt_notif_str(cmd, servicegroup)
nagios_return = self._write_command(notif_str)
if nagios_return:
return notif_str
else:
return "Fail: could not write to the command file"
def silence_host(self, host):
"""
This command is used to prevent notifications from being sent
out for the host and all services on the specified host.
This is equivalent to calling disable_host_svc_notifications
and disable_host_notifications.
Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"DISABLE_HOST_SVC_NOTIFICATIONS",
"DISABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def unsilence_host(self, host):
"""
This command is used to enable notifications for the host and
all services on the specified host.
This is equivalent to calling enable_host_svc_notifications
and enable_host_notifications.
Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
"""
cmd = [
"ENABLE_HOST_SVC_NOTIFICATIONS",
"ENABLE_HOST_NOTIFICATIONS"
]
nagios_return = True
return_str_list = []
for c in cmd:
notif_str = self._fmt_notif_str(c, host)
nagios_return = self._write_command(notif_str) and nagios_return
return_str_list.append(notif_str)
if nagios_return:
return return_str_list
else:
return "Fail: could not write to the command file"
def silence_nagios(self):
"""
This command is used to disable notifications for all hosts and services
in nagios.
This is a 'SHUT UP, NAGIOS' command
"""
cmd = 'DISABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def unsilence_nagios(self):
"""
This command is used to enable notifications for all hosts and services
in nagios.
This is a 'OK, NAGIOS, GO'' command
"""
cmd = 'ENABLE_NOTIFICATIONS'
self._write_command(self._fmt_notif_str(cmd))
def nagios_cmd(self, cmd):
"""
This sends an arbitrary command to nagios
It prepends the submitted time and appends a \n
You just have to provide the properly formatted command
"""
pre = '[%s]' % int(time.time())
post = '\n'
cmdstr = '%s %s %s' % (pre, cmd, post)
self._write_command(cmdstr)
def act(self):
"""
Figure out what you want to do from ansible, and then do the
needful (at the earliest).
"""
# host or service downtime?
if self.action == 'downtime':
if self.services == 'host':
self.schedule_host_downtime(self.host, self.minutes)
elif self.services == 'all':
self.schedule_host_svc_downtime(self.host, self.minutes)
else:
self.schedule_svc_downtime(self.host,
services=self.services,
minutes=self.minutes)
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
elif self.action == "servicegroup_service_downtime":
if self.servicegroup:
self.schedule_servicegroup_svc_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
# toggle the host AND service alerts
elif self.action == 'silence':
self.silence_host(self.host)
elif self.action == 'unsilence':
self.unsilence_host(self.host)
# toggle host/svc alerts
elif self.action == 'enable_alerts':
if self.services == 'host':
self.enable_host_notifications(self.host)
elif self.services == 'all':
self.enable_host_svc_notifications(self.host)
else:
self.enable_svc_notifications(self.host,
services=self.services)
elif self.action == 'disable_alerts':
if self.services == 'host':
self.disable_host_notifications(self.host)
elif self.services == 'all':
self.disable_host_svc_notifications(self.host)
else:
self.disable_svc_notifications(self.host,
services=self.services)
elif self.action == 'silence_nagios':
self.silence_nagios()
elif self.action == 'unsilence_nagios':
self.unsilence_nagios()
elif self.action == 'command':
self.nagios_cmd(self.command)
# wtf?
else:
self.module.fail_json(msg="unknown action specified: '%s'" % \
self.action)
self.module.exit_json(nagios_commands=self.command_results,
changed=True)
######################################################################
# import module snippets
from ansible.module_utils.basic import *
main()
|
chepazzo/ansible-modules-extras
|
monitoring/nagios.py
|
Python
|
gpl-3.0
| 33,186
|
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.7'
__tabversion__ = '3.5'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec, slevel = Productions[st_actionp[a].number].prec
rprec, rlevel = Precedence.get(a, ('right', 0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec, rlevel = Productions[st_actionp[a].number].prec
sprec, slevel = Precedence.get(a, ('right', 0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(' '.join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError, ValueError):
pass
digest = base64.b16encode(sig.digest())
if sys.version_info[0] >= 3:
digest = digest.decode('latin-1')
return digest
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
lines, linen = inspect.getsourcelines(module)
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = item.__code__.co_firstlineno
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
|
jonpry/PHDL
|
ply/yacc.py
|
Python
|
gpl-2.0
| 135,805
|
# -*- coding: utf-8 -*-
#
# Copyright © 2011 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
openfisca_qt.gui.qt.compat
-------------------
Transitional module providing compatibility functions intended to help
migrating from PyQt to PySide.
This module should be fully compatible with:
* PyQt >=v4.4
* both PyQt API #1 and API #2
* PySide
"""
import os
import sys
from .QtGui import QFileDialog
#===============================================================================
# QVariant conversion utilities
#===============================================================================
PYQT_API_1 = False
if os.environ['QT_API'] == 'pyqt':
import sip
try:
PYQT_API_1 = sip.getapi('QVariant') == 1 # PyQt API #1
except AttributeError:
# PyQt <v4.6
PYQT_API_1 = True
def to_qvariant(pyobj=None):
"""Convert Python object to QVariant
This is a transitional function from PyQt API #1 (QVariant exist)
to PyQt API #2 and Pyside (QVariant does not exist)"""
if PYQT_API_1:
# PyQt API #1
from PyQt4.QtCore import QVariant
return QVariant(pyobj)
else:
# PyQt API #2
return pyobj
def from_qvariant(qobj=None, convfunc=None):
"""Convert QVariant object to Python object
This is a transitional function from PyQt API #1 (QVariant exist)
to PyQt API #2 and Pyside (QVariant does not exist)"""
if PYQT_API_1:
# PyQt API #1
assert callable(convfunc)
if convfunc in (unicode, str):
return convfunc(qobj.toString())
elif convfunc is bool:
return qobj.toBool()
elif convfunc is int:
return qobj.toInt()[0]
elif convfunc is float:
return qobj.toDouble()[0]
else:
return convfunc(qobj)
else:
# PyQt API #2
return qobj
else:
def to_qvariant(obj=None): # analysis:ignore
"""Convert Python object to QVariant
This is a transitional function from PyQt API#1 (QVariant exist)
to PyQt API#2 and Pyside (QVariant does not exist)"""
return obj
def from_qvariant(qobj=None, pytype=None): # analysis:ignore
"""Convert QVariant object to Python object
This is a transitional function from PyQt API #1 (QVariant exist)
to PyQt API #2 and Pyside (QVariant does not exist)"""
return qobj
#===============================================================================
# Wrappers around QFileDialog static methods
#===============================================================================
def getexistingdirectory(parent=None, caption='', basedir='',
options=QFileDialog.ShowDirsOnly):
"""Wrapper around QtGui.QFileDialog.getExistingDirectory static method
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
# Calling QFileDialog static method
if sys.platform == "win32":
# On Windows platforms: redirect standard outputs
_temp1, _temp2 = sys.stdout, sys.stderr
sys.stdout, sys.stderr = None, None
try:
result = QFileDialog.getExistingDirectory(parent, caption, basedir,
options)
finally:
if sys.platform == "win32":
# On Windows platforms: restore standard outputs
sys.stdout, sys.stderr = _temp1, _temp2
if not isinstance(result, basestring):
# PyQt API #1
result = unicode(result)
return result
def _qfiledialog_wrapper(attr, parent=None, caption='', basedir='',
filters='', selectedfilter='', options=None):
if options is None:
options = QFileDialog.Options(0)
try:
# PyQt <v4.6 (API #1)
from .QtCore import QString
except ImportError:
# PySide or PyQt >=v4.6
QString = None # analysis:ignore
tuple_returned = True
try:
# PyQt >=v4.6
func = getattr(QFileDialog, attr+'AndFilter')
except AttributeError:
# PySide or PyQt <v4.6
func = getattr(QFileDialog, attr)
if QString is not None:
selectedfilter = QString()
tuple_returned = False
# Calling QFileDialog static method
if sys.platform == "win32":
# On Windows platforms: redirect standard outputs
_temp1, _temp2 = sys.stdout, sys.stderr
sys.stdout, sys.stderr = None, None
try:
result = func(parent, caption, basedir,
filters, selectedfilter, options)
except TypeError:
# The selectedfilter option (`initialFilter` in Qt) has only been
# introduced in Jan. 2010 for PyQt v4.7, that's why we handle here
# the TypeError exception which will be raised with PyQt v4.6
# (see Issue 960 for more details)
result = func(parent, caption, basedir, filters, options)
finally:
if sys.platform == "win32":
# On Windows platforms: restore standard outputs
sys.stdout, sys.stderr = _temp1, _temp2
# Processing output
if tuple_returned:
# PySide or PyQt >=v4.6
output, selectedfilter = result
else:
# PyQt <v4.6 (API #1)
output = result
if QString is not None:
# PyQt API #1: conversions needed from QString/QStringList
selectedfilter = unicode(selectedfilter)
if isinstance(output, QString):
# Single filename
output = unicode(output)
else:
# List of filenames
output = [unicode(fname) for fname in output]
# Always returns the tuple (output, selectedfilter)
print "in compat "
print selectedfilter
return output, selectedfilter
def getopenfilename(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getOpenFileName static method
Returns a tuple (filename, selectedfilter) -- when dialog box is canceled,
returns a tuple of empty strings
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getOpenFileName', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
def getopenfilenames(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getOpenFileNames static method
Returns a tuple (filenames, selectedfilter) -- when dialog box is canceled,
returns a tuple (empty list, empty string)
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getOpenFileNames', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
def getsavefilename(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getSaveFileName static method
Returns a tuple (filename, selectedfilter) -- when dialog box is canceled,
returns a tuple of empty strings
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getSaveFileName', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
if __name__ == '__main__':
from ..utils.qthelpers import qapplication
app = qapplication()
print repr(getexistingdirectory())
print repr(getopenfilename(filters='*.py;;*.txt'))
print repr(getopenfilenames(filters='*.py;;*.txt'))
print repr(getsavefilename(filters='*.py;;*.txt'))
sys.exit()
|
openfisca/openfisca-qt
|
openfisca_qt/gui/qt/compat.py
|
Python
|
agpl-3.0
| 8,179
|
__author__ = 'Exter, 0xBADDCAFE'
import wx
class FTDropTarget(wx.DropTarget):
"""
Implements drop target functionality to receive files and text
receiver - any WX class that can bind to events
evt - class that comes from wx.lib.newevent.NewCommandEvent call
class variable ID_DROP_FILE
class variable ID_DROP_TEXT
"""
ID_DROP_FILE = wx.NewId()
ID_DROP_TEXT = wx.NewId()
def __init__(self, receiver, evt):
"""
receiver - any WX class that can bind to events
evt - class that comes from wx.lib.newevent.NewCommandEvent call
"""
wx.DropTarget.__init__(self)
self.composite = wx.DataObjectComposite()
self.text_do = wx.TextDataObject()
self.file_do = wx.FileDataObject()
self.composite.Add(self.text_do)
self.composite.Add(self.file_do)
self.SetDataObject(self.composite)
self.receiver = receiver
self.evt = evt
def OnData(self, x, y, result):
"""Handles dropping files/text """
if self.GetData():
drop_type = self.composite.GetReceivedFormat().GetType()
if drop_type in (wx.DF_TEXT, wx.DF_UNICODETEXT):
wx.PostEvent(self.receiver, self.evt(id=self.ID_DROP_TEXT, text=self.text_do.GetText()))
elif drop_type == wx.DF_FILENAME:
wx.PostEvent(self.receiver, self.evt(id=self.ID_DROP_FILE, files=self.file_do.GetFilenames()))
assert isinstance(result, object)
return result
|
exter/pycover
|
droptarget.py
|
Python
|
mit
| 1,525
|
#!/usr/bin/python2
import xml.dom.minidom
import sys
from contextlib import closing
import urllib2
import pprint
def output(link, title=''):
if withtitle=="1":
print title.encode('utf-8'),"\n",link
else:
print link
withtitle = False
if len(sys.argv) > 2:
withtitle = sys.argv[2]
with closing(urllib2.urlopen(sys.argv[1])) as data:
xml_data = xml.dom.minidom.parse(data)
for item in xml_data.getElementsByTagName('item'):
#link = item.getElementsByTagName('media:content')
#if (len(link) > 0):
# print link[0].getAttribute('url')
# exit(0)
title = item.getElementsByTagName('title')
link = item.getElementsByTagName('link')
if (len(link) > 0) and not (item.getElementsByTagName('enclosure')):
output( link[0].firstChild.data, title[0].firstChild.data )
continue
link = item.getElementsByTagName('enclosure')
if (len(link) > 0):
output( link[0].getAttribute('url'), title[0].firstChild.data )
continue
print "NoUrlFound"
|
superkartoffel/fernbedienung
|
listPodcast.py
|
Python
|
gpl-2.0
| 970
|
import unittest
import test._test_multiprocessing
test._test_multiprocessing.install_tests_in_module_dict(globals(), 'fork')
if __name__ == '__main__':
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_multiprocessing_fork.py
|
Python
|
gpl-3.0
| 174
|
from __future__ import absolute_import, unicode_literals
from six import text_type
from django.db import models
from django.db.models.query import QuerySet
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel
from wagtail.wagtailsearch import index
from wagtail.wagtailsearch.backends import get_search_backend
class PollQuerySet(QuerySet):
def search(self, query_string, fields=None, backend='default'):
"""
This runs a search query on all the pages in the QuerySet
"""
search_backend = get_search_backend(backend)
return search_backend.search(query_string, self)
@python_2_unicode_compatible
class Vote(models.Model):
question = ParentalKey('Question', related_name='votes')
ip = models.GenericIPAddressField()
time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.question.question
class Meta:
verbose_name = _('vote')
verbose_name_plural = _('votes')
@python_2_unicode_compatible
class Question(ClusterableModel, models.Model):
poll = ParentalKey('Poll', related_name='questions')
question = models.CharField(max_length=128, verbose_name=_('Question'))
def __str__(self):
return self.question
class Meta:
verbose_name = _('question')
verbose_name_plural = _('questions')
@python_2_unicode_compatible
class Poll(ClusterableModel, models.Model, index.Indexed):
title = models.CharField(max_length=128, verbose_name=_('Title'))
date_created = models.DateTimeField(default=timezone.now)
class Meta:
verbose_name = _('poll')
verbose_name_plural = _('polls')
panels = [
FieldPanel('title'),
InlinePanel('questions', label=_('Questions'), min_num=1)
]
search_fields = (
index.SearchField('title', partial_match=True, boost=5),
index.SearchField('id', boost=10),
)
objects = PollQuerySet.as_manager()
def get_nice_url(self):
return slugify(text_type(self))
def get_template(self, request):
try:
return self.template
except AttributeError:
return '{0}/{1}.html'.format(self._meta.app_label, self._meta.model_name)
def form(self):
# Stops circular import
from .forms import VoteForm
return VoteForm(self)
def __str__(self):
return self.title
|
frague59/wagtailpolls
|
wagtailpolls/models.py
|
Python
|
bsd-3-clause
| 2,687
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1, s, t 2, s, t 3, s, t 4.1, s, t 4.2, s, q"
tags = "FadeIn, FadeOut, ColorLayer"
import pyglet
from pyglet.gl import *
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
l = ColorLayer( 255,128,64,64 )
main_scene.add( l, z=0 )
l.do( FadeOut( duration=2) + FadeIn( duration=2) )
director.run (main_scene)
description = """
A ColorLayer is faded-out and fadded-in.
Notice this will not work for arbitrary Layer objects.
"""
if __name__ == '__main__':
main()
|
eevee/cocos2d-mirror
|
test/test_fadeout_layer.py
|
Python
|
bsd-3-clause
| 802
|
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
class MrpWorkOrderProduce(models.TransientModel):
_inherit = "mrp.work.order.produce"
qty_to_produce = fields.Integer(string='Quantity to produce')
accepted_amount = fields.Integer(string='Accepted amount')
pending_accepted_amount = fields.Integer(
string='Pending accepted amount')
pending_rejected_amount = fields.Integer(
string='Pending rejected amount')
pending_total_amount = fields.Integer(
string='Pending total amount')
confirmed_accepted_amount = fields.Integer(
string='Confirmed accepted amount')
confirmed_rejected_amount = fields.Integer(
string='Confirmed rejected amount')
confirmed_total_amount = fields.Integer(
string='Confirmed total amount')
total_amount = fields.Integer(string='Total amount')
@api.model
def default_get(self, var_fields):
operation_obj = self.env['mrp.production.workcenter.line']
res = super(MrpWorkOrderProduce, self).default_get(var_fields)
operation = operation_obj.browse(self.env.context.get('active_id'))
accepted_amount = sum(
x.accepted_amount for x in
operation.operation_time_lines.filtered(
lambda r: r.state == 'processed'))
pending_accepted_amount = sum(
x.accepted_amount for x in operation.operation_time_lines.filtered(
lambda r: r.state == 'pending'))
pending_rejected_amount = sum(
x.rejected_amount for x in operation.operation_time_lines.filtered(
lambda r: r.state == 'pending'))
confirmed_rejected_amount = sum(
x.rejected_amount for x in operation.operation_time_lines.filtered(
lambda r: r.state == 'processed'))
res.update({'product_qty': pending_accepted_amount,
'qty_to_produce': operation.production_id.product_qty,
'accepted_amount': accepted_amount,
'pending_accepted_amount': pending_accepted_amount,
'pending_rejected_amount': pending_rejected_amount,
'pending_total_amount': (pending_accepted_amount +
pending_rejected_amount),
'confirmed_accepted_amount': accepted_amount,
'confirmed_rejected_amount': confirmed_rejected_amount,
'confirmed_total_amount': (accepted_amount +
confirmed_rejected_amount),
'total_amount': (
pending_accepted_amount + pending_rejected_amount +
accepted_amount + confirmed_rejected_amount)})
return res
@api.multi
def on_change_qty(self, product_qty, consume_lines):
operation_obj = self.env['mrp.production.workcenter.line']
planned_product_obj = self.env['mrp.production.product.line']
operation = operation_obj.browse(self.env.context.get('active_id'))
processed_accepted_amount = sum(
x.accepted_amount for x in
operation.operation_time_lines.filtered(
lambda r: r.state == 'processed'))
res = super(MrpWorkOrderProduce, self).on_change_qty(
operation.production_id.product_qty - processed_accepted_amount,
consume_lines)
accepted_amount = sum(
x.accepted_amount for x in
operation.operation_time_lines.filtered(
lambda r: r.state == 'pending'))
rejected_amount = sum(
x.rejected_amount for x in
operation.operation_time_lines.filtered(
lambda r: r.state == 'pending'))
if (product_qty + processed_accepted_amount >
operation.production_id.product_qty):
res['warning'] = {
'title': _('Product to produce'),
'message': _('Quantity to produce greater than planned')}
if (not res.get('value', False) or
(res.get('value', False) and not
res['value'].get('consume_lines',
False)) or res['value']['consume_lines'] == []):
res['value']['consume_lines'] = self._catch_consume_lines(
operation)
for line in res['value']['consume_lines']:
cond = [('work_order', '=', operation.id),
('product_id', '=', line[2].get('product_id'))]
planned_product = planned_product_obj.search(cond, limit=1)
factor = (planned_product.product_qty /
operation.production_id.product_qty)
line[2]['qty_to_produce'] = operation.production_id.product_qty
line[2]['planned_qty'] = planned_product.product_qty
line[2]['factor'] = factor
line[2]['product_qty'] = ((factor * accepted_amount) +
(factor * rejected_amount))
line[2]['accepted_amount'] = accepted_amount
line[2]['rejected_amount'] = rejected_amount
return res
def _catch_consume_lines(self, operation):
consume_lines = []
for line in operation.product_line:
consume_lines.append(
[0, False, {'lot_id': False,
'product_id': line.product_id.id,
'product_qty': line.product_qty}])
return consume_lines
@api.multi
def do_consume(self):
res = super(MrpWorkOrderProduce, self).do_consume()
self._update_operation_time_lines()
return res
@api.multi
def do_consume_produce(self):
res = super(MrpWorkOrderProduce, self).do_consume_produce()
self._update_operation_time_lines()
return res
def _update_operation_time_lines(self):
operation_obj = self.env['mrp.production.workcenter.line']
operation = operation_obj.browse(self.env.context['active_id'])
time_lines = operation.operation_time_lines.filtered(
lambda r: r.state == 'pending')
time_lines.write({'state': 'processed'})
class MrpProductProduceLine(models.TransientModel):
_inherit = "mrp.product.produce.line"
qty_to_produce = fields.Integer(string='Quantity to produce')
planned_qty = fields.Integer(string='Planned quantity to consume')
factor = fields.Float(string='Factor')
accepted_amount = fields.Integer(string='Accepted amount')
rejected_amount = fields.Integer(string='Rejected amount')
|
esthermm/odoomrp-wip
|
mrp_operations_rejected_quantity/wizard/mrp_work_order_produce.py
|
Python
|
agpl-3.0
| 6,694
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
# all regexes are case insensitive
ep_regexes = [
('standard_repeat',
# Show.Name.S01E02.S01E03.Source.Quality.Etc-Group
# Show Name - S01E02 - S01E03 - S01E04 - Ep Name
'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+) # E02 and separator
([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator
e(?P<extra_ep_num>\d+))+ # E03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''),
('fov_repeat',
# Show.Name.1x02.1x03.Source.Quality.Etc-Group
# Show Name - 1x02 - 1x03 - 1x04 - Ep Name
'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
([. _-]+(?P=season_num)x # 1x
(?P<extra_ep_num>\d+))+ # 03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''),
('standard',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+) # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720)[pi])\d+))* # additional E03/etc
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''),
('fov',
# Show_Name.1x02.Source_Quality_Etc-Group
# Show Name - 1x02 - My Ep Name
# Show_Name.1x02x03x04.Source_Quality_Etc-Group
# Show Name - 1x02-03-04 - My Ep Name
'''
^((?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
(([. _-]*x|-) # linking x/- char
(?P<extra_ep_num>
(?!(1080|720)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps
\d+))* # additional x03/etc
[\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''),
('scene_date_format',
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(?P<air_year>\d{4})[. _-]+ # 2010 and separator
(?P<air_month>\d{2})[. _-]+ # 11 and separator
(?P<air_day>\d{2}) # 23 and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''),
('stupid',
# tpz-abc102
'''
(?P<release_group>.+?)-\w+?[\. ]? # tpz-abc
(?!264) # don't count x264
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2})$ # 02
'''),
('verbose',
# Show Name Season 1 Episode 2 Ep Name
'''
^(?P<series_name>.+?)[. _-]+ # Show Name and separator
season[. _-]+ # season and separator
(?P<season_num>\d+)[. _-]+ # 1
episode[. _-]+ # episode and separator
(?P<ep_num>\d+)[. _-]+ # 02 and separator
(?P<extra_info>.+)$ # Source_Quality_Etc-
'''),
('season_only',
# Show.Name.S01.Source.Quality.Etc-Group
'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(eason[. _-])? # S01/Season 01
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''
),
('no_season_multi_ep',
# Show.Name.E02-03
# Show.Name.E02.2010
'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|[ivx]+)) # first ep num
((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner
(?P<extra_ep_num>(?!(1080|720)[pi])(\d+|[ivx]+))[. _-]) # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''
),
('no_season_general',
# Show.Name.E23.Test
# Show.Name.Part.3.Source.Quality.Etc-Group
# Show.Name.Part.1.and.Part.2.Blah-Group
'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|([ivx]+(?=[. _-])))) # first ep num
([. _-]+((and|&|to)[. _-]+)? # and/&/to joiner
((e(p(isode)?)?|part|pt)[. _-]?) # e, ep, episode, or part
(?P<extra_ep_num>(?!(1080|720)[pi])
(\d+|([ivx]+(?=[. _-]))))[. _-])* # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''
),
('bare',
# Show.Name.102.Source.Quality.Etc-Group
'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2}) # 02 and separator
([. _-]+(?P<extra_info>(?!\d{3}[. _-]+)[^-]+) # Source_Quality_Etc-
(-(?P<release_group>.+))?)?$ # Group
'''),
('no_season',
# Show Name - 01 - Ep Name
# 01 - Ep Name
'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(?P<ep_num>\d{2}) # 02
[. _-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])-(?P<release_group>[^-]+))?)?$ # Group
'''
),
]
anime_ep_regexes = [
('anime_ultimate',
"""
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>\d{1,3})
(-(?P<extra_ab_ep_num>\d{1,3}))?[ ._-]+?
(?:v(?P<version>[0-9]))?
(?:[\w\.]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
(?:[ ._]?\[(?P<crc>\w+)\])?
.*?
"""
),
('anime_naruto',
'''^(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>\d{1,3})
(-(?P<extra_ab_ep_num>\d{1,3}))?
(v(?P<version>[0-9]))?
([ ._-]*(VOSTFR)[ ._-]*)?
(par[ ._-]*)((?P<release_group>.+?)[ ._]+)?
[ ._-]*(?P<extra_info>\([ ._]*\d{3,4}[xp\*]?\d{0,4}[ ._]*\) - [a-zA-Z ]{10})?
.*?
'''),
('anime_standard',
# [Group Name] Show Name.13-14
# [Group Name] Show Name - 13-14
# Show Name 13-14
# [Group Name] Show Name.13
# [Group Name] Show Name - 13
# Show Name 13
'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>\d{1,3}) # E01
(-(?P<extra_ab_ep_num>\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_standard_round',
# TODO examples
# [Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]
# [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)
'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>\d{1,3}) # E01
(-(?P<extra_ab_ep_num>\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\((?P<extra_info>(CX[ ._-]?)?\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_slash',
# [SGKK] Bleach 312v1 [720p/MKV]
'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>\d{1,3}) # E01
(-(?P<extra_ab_ep_num>\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}p) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_standard_codec',
# [Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]
# [Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]
# [Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]
'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._]* # Show_Name and separator
([ ._-]+-[ ._-]+[A-Z]+[ ._-]+)?[ ._-]+ # funny stuff, this is sooo nuts ! this will kick me in the butt one day
(?P<ep_ab_num>\d{1,3}) # E01
(-(?P<extra_ab_ep_num>\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
([ ._-](\[\w{1,2}\])?\[[a-z][.]?\w{2,4}\])? #codec
[ ._-]*\[(?P<extra_info>(\d{3,4}[xp]?\d{0,4})?[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])?
.*? # Separator and EOL
'''),
('anime_and_normal',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
(?P<ep_ab_num>\d{1,3}) # absolute number
(-(?P<extra_ab_ep_num>\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
'''
),
('anime_and_normal_x',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[xX](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
(?P<ep_ab_num>\d{1,3}) # absolute number
(-(?P<extra_ab_ep_num>\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
'''
),
('anime_and_normal_reverse',
# Bleach - 313-314 - s16e03-04
'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<ep_ab_num>\d{1,3}) # absolute number
(-(?P<extra_ab_ep_num>\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
'''
),
('anime_and_normal_front',
# 165.Naruto Shippuuden.s08e014
'''
^(?P<ep_ab_num>\d{1,3}) # start of string and absolute number
(-(?P<extra_ab_ep_num>\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))?[ ._-]+ # the version e.g. "v2"
(?P<series_name>.+?)[ ._-]+
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+)
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
'''
),
('anime_ep_name',
"""
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>\d{1,3})
(-(?P<extra_ab_ep_num>\d{1,3}))?[ ._-]*?
(?:v(?P<version>[0-9])[ ._-]+?)?
(?:.+?[ ._-]+?)?
\[(?P<extra_info>\w+)\][ ._-]?
(?:\[(?P<crc>\w{8})\])?
.*?
"""
),
('anime_bare',
# One Piece - 102
# [ACX]_Wolf's_Spirit_001.mkv
'''
^(\[(?P<release_group>.+?)\][ ._-]*)?
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>\d{3}) # E01
(-(?P<extra_ab_ep_num>\d{3}))? # E02
(v(?P<version>[0-9]))? # v2
.*? # Separator and EOL
''')
]
|
stephanehenry27/Sickbeard-anime
|
sickbeard/name_parser/regexes.py
|
Python
|
gpl-3.0
| 19,613
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, thumbor-community
# Use of this source code is governed by the MIT license that can be
# found in the LICENSE file.
import logging
from redis import Redis, RedisError
from thumbor.utils import on_exception
from tornado.concurrent import return_future
from tc_shortener.storages import BaseStorage
logger = logging.getLogger('thumbor')
class Storage(BaseStorage):
storage = None
def __init__(self, context, shared_client=True):
'''Initialize the RedisStorage
:param thumbor.context.Context context: Current context
:param boolean shared_client: When set to True a singleton client will
be used.
'''
super(Storage, self).__init__(context)
self.shared_client = shared_client
self.storage = self.reconnect_redis()
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage
def reconnect_redis(self):
if self.shared_client and Storage.storage:
return Storage.storage
password = self.context.config.get(
'SHORTENER_REDIS_STORAGE_SERVER_PASSWORD',
None
)
storage = Redis(
port=self.context.config.SHORTENER_REDIS_STORAGE_SERVER_PORT,
host=self.context.config.SHORTENER_REDIS_STORAGE_SERVER_HOST,
db=self.context.config.SHORTENER_REDIS_STORAGE_SERVER_DB,
password=password
)
if self.shared_client:
Storage.storage = storage
return storage
def on_redis_error(self, fname, exc_type, exc_value):
'''Callback executed when there is a redis error.
:param string fname: Function name that was being called.
:param type exc_type: Exception type
:param Exception exc_value: The current exception
:returns: Default value or raise the current exception
'''
if self.shared_client:
Storage.storage = None
else:
self.storage = None
raise exc_value
@on_exception(on_redis_error, RedisError)
def put(self, key, url):
storage = self.get_storage()
storage.set(key, url)
@return_future
def get(self, key, callback):
@on_exception(self.on_redis_error, RedisError)
def wrap():
return self.get_storage().get(key)
callback(wrap())
|
thumbor-community/shortener
|
tc_shortener/storages/redis_storage.py
|
Python
|
mit
| 2,587
|
import _plotly_utils.basevalidators
class XcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xcalendar", parent_name="histogram", **kwargs):
super(XcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"chinese",
"coptic",
"discworld",
"ethiopian",
"gregorian",
"hebrew",
"islamic",
"jalali",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram/_xcalendar.py
|
Python
|
mit
| 1,013
|
import numpy as np
import pickle
from pygmin.potentials.lj import LJ
from pygmin.NEB.NEB import NEB
import pylab as pl
dataset = pickle.load(open("coords.3.dat", "r"))
pot = LJ()
for coords1,coords2 in dataset:
neb = NEB(coords1,coords2,pot)
neb.optimize()
pl.plot(neb.energies)
pl.show()
|
js850/PyGMIN
|
scripts/benchmark/neb_benchmark.py
|
Python
|
gpl-3.0
| 311
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Read data file users.parquet in local Spark distro:
$ cd $SPARK_HOME
$ export AVRO_PARQUET_JARS=/path/to/parquet-avro-1.5.0.jar
$ ./bin/spark-submit --driver-class-path /path/to/example/jar \\
--jars $AVRO_PARQUET_JARS \\
./examples/src/main/python/parquet_inputformat.py \\
examples/src/main/resources/users.parquet
<...lots of log output...>
{u'favorite_color': None, u'name': u'Alyssa', u'favorite_numbers': [3, 9, 15, 20]}
{u'favorite_color': u'red', u'name': u'Ben', u'favorite_numbers': []}
<...more log output...>
"""
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 2:
print("""
Usage: parquet_inputformat.py <data_file>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \\
/path/to/examples/parquet_inputformat.py <data_file>
Assumes you have Parquet data stored in <data_file>.
""", file=sys.stderr)
exit(-1)
path = sys.argv[1]
spark = SparkSession\
.builder\
.appName("ParquetInputFormat")\
.getOrCreate()
sc = spark.sparkContext
parquet_rdd = sc.newAPIHadoopFile(
path,
'org.apache.parquet.avro.AvroParquetInputFormat',
'java.lang.Void',
'org.apache.avro.generic.IndexedRecord',
valueConverter='org.apache.spark.examples.pythonconverters.IndexedRecordToJavaConverter')
output = parquet_rdd.map(lambda x: x[1]).collect()
for k in output:
print(k)
spark.stop()
|
esi-mineset/spark
|
examples/src/main/python/parquet_inputformat.py
|
Python
|
apache-2.0
| 2,386
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`formattingtagform` provides an Tag Edit facility. The Base set are protected and included each time loaded.
Custom tags can be defined and saved. The Custom Tag arrays are saved in a json string so QSettings works on them.
Base Tags cannot be changed.
"""
from PyQt4 import QtGui
from openlp.core.common import translate
from openlp.core.lib import FormattingTags
from openlp.core.ui.formattingtagdialog import Ui_FormattingTagDialog
from openlp.core.ui.formattingtagcontroller import FormattingTagController
class EditColumn(object):
"""
Hides the magic numbers for the table columns
"""
Description = 0
Tag = 1
StartHtml = 2
EndHtml = 3
class FormattingTagForm(QtGui.QDialog, Ui_FormattingTagDialog, FormattingTagController):
"""
The :class:`FormattingTagForm` manages the settings tab .
"""
def __init__(self, parent):
"""
Constructor
"""
super(FormattingTagForm, self).__init__(parent)
self.setupUi(self)
self._setup()
def _setup(self):
"""
Set up the class. This method is mocked out by the tests.
"""
self.services = FormattingTagController()
self.tag_table_widget.itemSelectionChanged.connect(self.on_row_selected)
self.new_button.clicked.connect(self.on_new_clicked)
self.delete_button.clicked.connect(self.on_delete_clicked)
self.tag_table_widget.currentCellChanged.connect(self.on_current_cell_changed)
self.button_box.rejected.connect(self.close)
# Forces reloading of tags from openlp configuration.
FormattingTags.load_tags()
self.is_deleting = False
self.reloading = False
def exec_(self):
"""
Load Display and set field state.
"""
# Create initial copy from master
self._reloadTable()
return QtGui.QDialog.exec_(self)
def on_row_selected(self):
"""
Table Row selected so display items and set field state.
"""
self.delete_button.setEnabled(True)
def on_new_clicked(self):
"""
Add a new tag to edit list and select it for editing.
"""
new_row = self.tag_table_widget.rowCount()
self.tag_table_widget.insertRow(new_row)
self.tag_table_widget.setItem(new_row, 0, QtGui.QTableWidgetItem(translate('OpenLP.FormattingTagForm',
'New Tag %d' % new_row)))
self.tag_table_widget.setItem(new_row, 1, QtGui.QTableWidgetItem('n%d' % new_row))
self.tag_table_widget.setItem(new_row, 2,
QtGui.QTableWidgetItem(translate('OpenLP.FormattingTagForm', '<HTML here>')))
self.tag_table_widget.setItem(new_row, 3, QtGui.QTableWidgetItem(''))
self.tag_table_widget.resizeRowsToContents()
self.tag_table_widget.scrollToBottom()
self.tag_table_widget.selectRow(new_row)
def on_delete_clicked(self):
"""
Delete selected custom row.
"""
selected = self.tag_table_widget.currentRow()
if selected != -1:
self.is_deleting = True
self.tag_table_widget.removeRow(selected)
def accept(self):
"""
Update Custom Tag details if not duplicate and save the data.
"""
count = 0
self.services.pre_save()
while count < self.tag_table_widget.rowCount():
error = self.services.validate_for_save(self.tag_table_widget.item(count, 0).text(),
self.tag_table_widget.item(count, 1).text(),
self.tag_table_widget.item(count, 2).text(),
self.tag_table_widget.item(count, 3).text())
if error:
QtGui.QMessageBox.warning(self, translate('OpenLP.FormattingTagForm', 'Validation Error'), error,
QtGui.QMessageBox.Ok)
self.tag_table_widget.selectRow(count)
return
count += 1
self.services.save_tags()
QtGui.QDialog.accept(self)
def _reloadTable(self):
"""
Reset List for loading.
"""
self.reloading = True
self.tag_table_widget_read.clearContents()
self.tag_table_widget_read.setRowCount(0)
self.tag_table_widget.clearContents()
self.tag_table_widget.setRowCount(0)
self.new_button.setEnabled(True)
self.delete_button.setEnabled(False)
for line_number, html in enumerate(FormattingTags.get_html_tags()):
if html['protected']:
line = self.tag_table_widget_read.rowCount()
self.tag_table_widget_read.setRowCount(line + 1)
self.tag_table_widget_read.setItem(line, 0, QtGui.QTableWidgetItem(html['desc']))
self.tag_table_widget_read.setItem(line, 1, QtGui.QTableWidgetItem(self._strip(html['start tag'])))
self.tag_table_widget_read.setItem(line, 2, QtGui.QTableWidgetItem(html['start html']))
self.tag_table_widget_read.setItem(line, 3, QtGui.QTableWidgetItem(html['end html']))
self.tag_table_widget_read.resizeRowsToContents()
else:
line = self.tag_table_widget.rowCount()
self.tag_table_widget.setRowCount(line + 1)
self.tag_table_widget.setItem(line, 0, QtGui.QTableWidgetItem(html['desc']))
self.tag_table_widget.setItem(line, 1, QtGui.QTableWidgetItem(self._strip(html['start tag'])))
self.tag_table_widget.setItem(line, 2, QtGui.QTableWidgetItem(html['start html']))
self.tag_table_widget.setItem(line, 3, QtGui.QTableWidgetItem(html['end html']))
self.tag_table_widget.resizeRowsToContents()
# Permanent (persistent) tags do not have this key
html['temporary'] = False
self.reloading = False
def on_current_cell_changed(self, cur_row, cur_col, pre_row, pre_col):
"""
This function processes all user edits in the table. It is called on each cell change.
"""
if self.is_deleting:
self.is_deleting = False
return
if self.reloading:
return
# only process for editable rows
if self.tag_table_widget.item(pre_row, 0):
item = self.tag_table_widget.item(pre_row, pre_col)
text = item.text()
errors = None
if pre_col is EditColumn.Description:
if not text:
errors = translate('OpenLP.FormattingTagForm', 'Description is missing')
elif pre_col is EditColumn.Tag:
if not text:
errors = translate('OpenLP.FormattingTagForm', 'Tag is missing')
elif pre_col is EditColumn.StartHtml:
# HTML edited
item = self.tag_table_widget.item(pre_row, 3)
end_html = item.text()
errors, tag = self.services.start_tag_changed(text, end_html)
if tag:
self.tag_table_widget.setItem(pre_row, 3, QtGui.QTableWidgetItem(tag))
self.tag_table_widget.resizeRowsToContents()
elif pre_col is EditColumn.EndHtml:
# HTML edited
item = self.tag_table_widget.item(pre_row, 2)
start_html = item.text()
errors, tag = self.services.end_tag_changed(start_html, text)
if tag:
self.tag_table_widget.setItem(pre_row, 3, QtGui.QTableWidgetItem(tag))
if errors:
QtGui.QMessageBox.warning(self, translate('OpenLP.FormattingTagForm', 'Validation Error'), errors,
QtGui.QMessageBox.Ok)
self.tag_table_widget.resizeRowsToContents()
|
crossroadchurch/paul
|
openlp/core/ui/formattingtagform.py
|
Python
|
gpl-2.0
| 9,571
|
#!/usr/bin/env python
"""
Compute formation enthalpy from given structures.
The 1st file in the arguments is the product and the following files are the reactants.
(Assuming that the product is only one chemical compound not plural.)
If --erg-xxx option is not specified, pmd will be performed to get energies.
Usage:
fenthalpy.py [options]
Options:
-h, --help Show this message and exit.
--dry Dry run. Calculate only coefficients of reactants.
--product PROD
Only one atom config file of the product. [default: None]
--reactants REACT
Atom config files of reactants. Comma separated. [default: None]
--erg-prod EPROD
Energy per atom of the product if available. If provided, not to perform MD relaxation. [default: None]
--ergs-react ERGS
Energies per atom of reactants if available in the order corresponding to given files. Comma separated.
If provided, not to perform MD relaxation. [default: None]
--nstp NSTP Number of steps for relaxation MD. [default: 1000]
--dt DT Time interval for relaxation MD. [default: -2.0]
--out4fp Write out to a file in the fp.py data format.
--outfname OUTFILE
Output file name for out4fp. [default: data.pmd.fenth]
--print-level IPRINT
Print level in pmd. [default: 0]
"""
import os,sys
from datetime import datetime
from docopt import docopt
import numpy as np
import nappy
__author__ = "RYO KOBAYASHI"
__version__ = "rev210809"
def get_unit_comp(nsys):
"""
Get an unit formula from arbitrary composition.
Input:
nsys: NAPSystem
Output:
gcd: integer
Greatest common divider.
unum_sp: list of integer
List of number of species that is the unit composition.
"""
num_species = nsys.natm_per_species()
gcd = np.gcd.reduce(num_species)
unum_sp = [ n/gcd for n in num_species ]
return gcd,unum_sp
def get_reactant_coeffs(reactants,product):
"""
Get coefficients for reactant structures by solving linear algebra Ax=b.
This is not available for the case the number of reactants is greater than the number of species.
Input:
reactants: list of NAPSystems
product: NAPSystem
Output:
coeffs: numpy array of coeffcients
"""
specorder = product.specorder
b_vec = np.array([ float(bi) for bi in product.natm_per_species()])
#...Construct A_mat
nsp = len(specorder)
nreact = len(reactants)
A_mat = np.zeros((nsp,nreact))
for ir,reactant in enumerate(reactants):
specorder_ir = reactant.specorder
natm_per_sp = reactant.natm_per_species()
for isp,sp in enumerate(specorder):
if not sp in specorder_ir:
A_mat[isp,ir] = 0.0
else:
sp_in_spir = specorder_ir.index(sp)
A_mat[isp,ir] = float(natm_per_sp[sp_in_spir])
print(' A_mat = ',A_mat)
print(' b_vec = ',b_vec)
#...Since nreact could be nsp, A_mat may not have inverse,
#...so solve minimization of |Ax-b|^2 to obtain x^* vector, x^*=(A^T*A)*A^T*b.
AA = np.dot(A_mat.T,A_mat)
AAinv = np.linalg.inv(AA)
x = np.dot(AAinv,np.dot(A_mat.T,b_vec))
#...For check
Ax = np.dot(A_mat,x)
if len(Ax) != len(b_vec):
raise ValueError('len(Ax) != len(b_vec)')
wrong = False
for i in range(len(b_vec)):
if abs(Ax[i] -b_vec[i]) > 0.01:
wrong = True
if wrong:
print(' WARNING: Exact solution was not obtained.')
print(' Result maybe wrong: i,Ax[i],b[i].')
for i in range(len(b_vec)):
print(' {0:2d} {1:5.1f} {2:5.1f}'.format(i,Ax[i],b_vec[i]))
else:
print(' Ax=b is satisfied, which means the exact number relationship between LHS and RHS is found.')
return x
def calc_formation_enthalpy(ergs_react,erg_prod,coeffs):
"""
Calculate the formation enthalpy using energies and coefficients of reactants,
and energy of product.
"""
if len(ergs_react) != len(coeffs):
raise ValueError('len(ergs_react) != len(coeffs)')
dH = erg_prod
for i in range(len(ergs_react)):
ei = ergs_react[i]
ai = coeffs[i]
dH -= ai*ei
dH = -dH
return dH
def get_pmd_done(nsys,nstp=1000,dt=-2.0,print_level=0):
"""
Perform pmd of relaxation and return the pmd object.
"""
print(' Relaxing the system: ',nsys.get_chemical_formula())
pmd = nappy.pmd.PMD(nsys)
pmd.load_inpmd()
pmd.set_params(stress_control='vc-Berendsen', pressure_target=0.0,
stress_target=[[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]],
stress_relax_time=50.0, print_level=print_level)
pmd.run(nstp=nstp,dt=dt,ifdmp=1,dmp=0.99)
nsys_fin = pmd.get_system()
nappy.io.write(nsys_fin,fname="pmdfin_{0:s}".format(nsys_fin.get_chemical_formula()))
return pmd
def write_fenth_out4fp(fname,dH,vol):
"""
Write out formation enthalpy and volume per atom in fp.py general data format.
Parameters:
-----------
fname : string
Name of the output file.
dH : float
Formation enthalpy per atom.
vol : float
Volume per atom.
"""
with open(fname,'w') as f:
cmd = ' '.join(s for s in sys.argv)
f.write('# Output at {0:s} from,\n'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
f.write('# {0:s}\n'.format(cmd))
#...Num of data, weight for the data
f.write(' {0:6d} {1:7.3f}\n'.format(2, 1.0))
f.write(' {0:8.3f} {1:8.3f}\n'.format(dH,vol))
return None
def main(args):
product = args['--product']
if product == 'None':
raise ValueError('A product must be given via --product option.')
reactants = [ x for x in args['--reactants'].split(',') ]
if reactants[0] == 'None':
raise ValueError('At least one reactants must be given via --reactants option.')
dry = args['--dry']
nstp = int(args['--nstp'])
dt = float(args['--dt'])
out4fp = args['--out4fp']
erg_prod = args['--erg-prod']
ergs_react = args['--ergs-react']
iprint = int(args['--print-level'])
if erg_prod != 'None':
erg_prod = float(erg_prod)
if ergs_react != 'None':
ergs_react = [ float(x) for x in ergs_react.split(',') ]
if len(ergs_react) != len(reactants):
raise ValueError('Number of files and --ergs-react are not consistent with --reactants.')
print(' Working directory: ',os.getcwd())
product = nappy.io.read(product)
reactants = [ nappy.io.read(f) for f in reactants ]
print(' Product: ',product.get_chemical_formula())
print(' Reactants: ',end='')
for r in reactants:
print(r.get_chemical_formula()+', ',end='')
print('')
#...Compute coefficients of reactants
coeffs = get_reactant_coeffs(reactants,product)
print(' Coefficients, x_vec: ',)
for i,r in enumerate(reactants):
print(' {0:<12s} = {1:>5.2f}'.format(r.get_chemical_formula(),coeffs[i]))
sys.stdout.flush()
if dry:
return None
if erg_prod == 'None': # Compute relaxation and get potential energies of given structures.
pmd_prod = get_pmd_done(product,nstp=nstp,dt=dt,print_level=iprint)
erg_prod = pmd_prod.result['epot']
product = pmd_prod.get_system()
else: # Energy per atom is given
erg_prod *= len(product)
if ergs_react == 'None':
pmds_react = [ get_pmd_done(r,nstp=nstp,dt=dt,print_level=iprint)
for r in reactants ]
ergs_react = [ p.result['epot'] for p in pmds_react ]
reactants = [ p.get_system() for p in pmds_react ]
else:
ergs_react = [ e*len(r) for e,r in zip(ergs_react,reactants) ]
print(' E of product, {0:s} = {1:.3f}'.format(product.get_chemical_formula(),erg_prod))
print(' Es of reactants:')
for i in range(len(ergs_react)):
r = reactants[i]
print(' {0:<12s} = {1:>8.3f}'.format(r.get_chemical_formula(),ergs_react[i]))
#...Get formation enthalpy
dH = calc_formation_enthalpy(ergs_react,erg_prod,coeffs)
gcd = np.gcd.reduce(product.natm_per_species())
print(' Formation enthalpy per formula unit:')
print(' dH = -1*[ {0:.2f} '.format(erg_prod),end='')
for i,r in enumerate(reactants):
print('-{0:.2f}*({1:.2f}) '.format(coeffs[i],ergs_react[i]),end='')
print(']/{0:d}'.format(gcd))
print(' = -1*[ {0:.2f} '.format(erg_prod),end='')
for i,r in enumerate(reactants):
print('-({0:.2f}) '.format(coeffs[i]*ergs_react[i]),end='')
print(']/{0:d}'.format(gcd))
print(' = {0:.2f} (eV/f.u.)'.format(dH/gcd))
print(' Formation enthalpy per atom:')
print(' = {0:.3f} (eV/atom)'.format(dH/len(product)))
print(' at volume per atom:')
vol = product.get_volume()/len(product)
print(' = {0:.3f} (Ang^3/atom)'.format(vol))
if out4fp:
outfname = args['--outfname']
write_fenth_out4fp(outfname,dH/len(product),vol)
print(' Wrote {0:s}'.format(outfname))
return None
if __name__ == "__main__":
args = docopt(__doc__)
main(args)
|
ryokbys/nap
|
nappy/fenthalpy.py
|
Python
|
mit
| 9,251
|
# -*- coding:Utf-8 -*-
from tastypie import fields as base_fields
from tastypie_mongoengine import fields
from core.api.utils import VosaeResource
from vosae_settings.models.core_settings import StorageQuotasSettings, CoreSettings
from vosae_settings.api.doc import HELP_TEXT
__all__ = (
'CoreSettingsResource',
)
class StorageQuotasSettingsResource(VosaeResource):
allocated_space = base_fields.IntegerField(
attribute='allocated_space',
readonly=True,
help_text=HELP_TEXT['storage_quotas_settings']['allocated_space']
)
used_space = base_fields.IntegerField(
attribute='used_space',
readonly=True,
help_text=HELP_TEXT['storage_quotas_settings']['used_space']
)
class Meta:
object_class = StorageQuotasSettings
class CoreSettingsResource(VosaeResource):
quotas = fields.EmbeddedDocumentField(
embedded='vosae_settings.api.resources.core_settings.StorageQuotasSettingsResource',
attribute='quotas',
readonly=True,
help_text=HELP_TEXT['core_settings']['quotas']
)
class Meta:
object_class = CoreSettings
|
Naeka/vosae-app
|
www/vosae_settings/api/resources/core_settings.py
|
Python
|
agpl-3.0
| 1,145
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
print("Hello world!")
|
djrlj694/Python-Demo
|
hello_world.py
|
Python
|
unlicense
| 65
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from oslo_config import cfg
import webob.exc
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.stacks as stacks
from heat.api.openstack.v1.views import stacks_view
from heat.common import context
from heat.common import exception as heat_exc
from heat.common import identifier
from heat.common import policy
from heat.common import template_format
from heat.common import urlfetch
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
from heat.tests.api.openstack_v1 import tools
from heat.tests import common
class InstantiationDataTest(common.HeatTestCase):
def test_parse_error_success(self):
with stacks.InstantiationData.parse_error_check('Garbage'):
pass
def test_parse_error(self):
def generate_error():
with stacks.InstantiationData.parse_error_check('Garbage'):
raise ValueError
self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
def test_parse_error_message(self):
# make sure the parser error gets through to the caller.
bad_temp = '''
heat_template_version: '2013-05-23'
parameters:
KeyName:
type: string
description: bla
'''
def generate_error():
with stacks.InstantiationData.parse_error_check('foo'):
template_format.parse(bad_temp)
parse_ex = self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
self.assertIn('foo', str(parse_ex))
def test_stack_name(self):
body = {'stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertEqual('wibble', data.stack_name())
def test_stack_name_missing(self):
body = {'not the stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.stack_name)
def test_template_inline(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(template, data.template())
def test_template_string_json(self):
template = ('{"heat_template_version": "2013-05-23",'
'"foo": "bar", "blarg": "wibble"}')
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(json.loads(template), data.template())
def test_template_string_yaml(self):
template = '''HeatTemplateFormatVersion: 2012-12-12
foo: bar
blarg: wibble
'''
parsed = {u'HeatTemplateFormatVersion': u'2012-12-12',
u'blarg': u'wibble',
u'foo': u'bar'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(parsed, data.template())
def test_template_int(self):
template = '42'
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.template)
def test_template_url(self):
template = {'heat_template_version': '2013-05-23',
'foo': 'bar',
'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template_url': url}
data = stacks.InstantiationData(body)
mock_get = self.patchobject(urlfetch, 'get',
return_value=json.dumps(template))
self.assertEqual(template, data.template())
mock_get.assert_called_once_with(url)
def test_template_priority(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template': template, 'template_url': url}
data = stacks.InstantiationData(body)
mock_get = self.patchobject(urlfetch, 'get')
self.assertEqual(template, data.template())
mock_get.assert_not_called()
def test_template_missing(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the template': template}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.template)
def test_template_exceeds_max_template_size(self):
cfg.CONF.set_override('max_template_size', 10)
template = json.dumps(['a'] * cfg.CONF.max_template_size)
body = {'template': template}
data = stacks.InstantiationData(body)
error = self.assertRaises(heat_exc.RequestLimitExceeded,
data.template)
msg = ('Request limit exceeded: Template size (%(actual_len)s '
'bytes) exceeds maximum allowed size (%(limit)s bytes).') % {
'actual_len': len(str(template)),
'limit': cfg.CONF.max_template_size}
self.assertEqual(msg, str(error))
def test_parameters(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
body = {'parameters': params,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(body, data.environment())
def test_environment_only_params(self):
env = {'parameters': {'foo': 'bar', 'blarg': 'wibble'}}
body = {'environment': env}
data = stacks.InstantiationData(body)
self.assertEqual(env, data.environment())
def test_environment_with_env_files(self):
env = {'parameters': {'foo': 'bar', 'blarg': 'wibble'}}
body = {'environment': env, 'environment_files': ['env.yaml']}
expect = {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_environment_and_parameters(self):
body = {'parameters': {'foo': 'bar'},
'environment': {'parameters': {'blarg': 'wibble'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar'},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_parameters_override_environment(self):
# This tests that the cli parameters will override
# any parameters in the environment.
body = {'parameters': {'foo': 'bar',
'tester': 'Yes'},
'environment': {'parameters': {'blarg': 'wibble',
'tester': 'fail'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar',
'tester': 'Yes'},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_environment_empty_params(self):
env = {'parameters': None}
body = {'environment': env}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.environment)
def test_environment_bad_format(self):
env = {'somethingnotsupported': {'blarg': 'wibble'}}
body = {'environment': json.dumps(env)}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.environment)
def test_environment_missing(self):
env = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the environment': env}
data = stacks.InstantiationData(body)
self.assertEqual({'parameters': {}, 'encrypted_param_names': [],
'parameter_defaults': {}, 'resource_registry': {},
'event_sinks': []},
data.environment())
def test_args(self):
body = {
'parameters': {},
'environment': {},
'stack_name': 'foo',
'template': {},
'template_url': 'http://example.com/',
'timeout_mins': 60,
}
data = stacks.InstantiationData(body)
self.assertEqual({'timeout_mins': 60}, data.args())
@mock.patch.object(policy.Enforcer, 'enforce')
class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
"""Tests the API class StackController.
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
"""
def setUp(self):
super(StackControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8004
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {},
u'outputs': [],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.index(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_status': u'CREATE_COMPLETE'
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.33')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_bogus_pagination_param(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
'sort_keys': 'fake sort keys',
'marker': 'fake marker',
'sort_dir': 'fake sort dir',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(12, len(engine_args))
self.assertIn('limit', engine_args)
self.assertIn('sort_keys', engine_args)
self.assertIn('marker', engine_args)
self.assertIn('sort_dir', engine_args)
self.assertIn('filters', engine_args)
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_limit_not_int(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'limit': 'not-an-int'}
req = self._get('/stacks', params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req,
tenant_id=self.tenant)
self.assertEqual("Only integer is acceptable by 'limit'.",
str(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_bogus_filter_param(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'id': 'fake id',
'status': 'fake status',
'name': 'fake name',
'action': 'fake action',
'username': 'fake username',
'tenant': 'fake tenant',
'owner_id': 'fake owner-id',
'stack_name': 'fake stack name',
'stack_identity': 'fake identity',
'creation_time': 'create timestamp',
'updated_time': 'update timestamp',
'deletion_time': 'deletion timestamp',
'notification_topics': 'fake topic',
'description': 'fake description',
'template_description': 'fake description',
'parameters': 'fake params',
'outputs': 'fake outputs',
'stack_action': 'fake action',
'stack_status': 'fake status',
'stack_status_reason': 'fake status reason',
'capabilities': 'fake capabilities',
'disable_rollback': 'fake value',
'timeout_mins': 'fake timeout',
'stack_owner': 'fake owner',
'parent': 'fake parent',
'stack_user_project_id': 'fake project id',
'tags': 'fake tags',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(16, len(filters))
for key in ('id', 'status', 'name', 'action', 'username', 'tenant',
'owner_id', 'stack_name', 'stack_action', 'stack_status',
'stack_status_reason', 'disable_rollback', 'timeout_mins',
'stack_owner', 'parent', 'stack_user_project_id'):
self.assertIn(key, filters)
for key in ('stack_identity', 'creation_time', 'updated_time',
'deletion_time', 'notification_topics', 'description',
'template_description', 'parameters', 'outputs',
'capabilities', 'tags', 'balrog'):
self.assertNotIn(key, filters)
def test_index_returns_stack_count_if_with_count_is_true(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock(return_value=0)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
def test_index_doesnt_return_stack_count_if_with_count_is_false(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'false'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock()
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
self.assertFalse(engine.count_stacks.called)
def test_index_with_count_is_invalid(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'invalid_value'}
req = self._get('/stacks', params=params)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req, tenant_id=self.tenant)
excepted = ('Unrecognized value "invalid_value" for "with_count", '
'acceptable values are: true, false')
self.assertIn(excepted, str(exc))
@mock.patch.object(rpc_client.EngineClient, 'count_stacks')
def test_index_doesnt_break_with_old_engine(self, mock_count_stacks,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
mock_count_stacks.side_effect = AttributeError("Should not exist")
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
def test_index_enforces_global_index_if_global_tenant(self, mock_enforce):
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
self.controller.index(req, tenant_id=self.tenant)
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
is_registered_policy=True,
context=self.context,
target={"project_id": self.tenant}
)
def test_global_index_uses_admin_context(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
mock_admin_ctxt = self.patchobject(context, 'get_admin_context')
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY)
self.assertEqual(1, mock_admin_ctxt.call_count)
def test_index_with_admin_context(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
view_collection_mock = self.patchobject(stacks_view, 'collection')
req = self._get('/stacks')
req.context.is_admin = True
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY)
view_collection_mock.assert_called_once_with(mock.ANY,
stacks=mock.ANY,
count=mock.ANY,
include_project=True)
def test_global_index_show_deleted_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=False)
def test_global_index_show_deleted_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=True)
def test_global_index_show_nested_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_nested=False)
def test_global_index_show_nested_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_nested=True)
def test_global_index_show_hidden_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_hidden': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_hidden=True)
def test_global_index_show_hidden_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_hidden': 'false'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_hidden=False)
def test_index_show_deleted_True_with_count_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'True',
'with_count': 'false'}
req = self._get('/stacks', params=params)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=True)
self.assertFalse(rpc_client.count_stacks.called)
def test_index_show_deleted_True_with_count_True(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock(return_value=0)
params = {'show_deleted': 'True',
'with_count': 'True'}
req = self._get('/stacks', params=params)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=True)
rpc_client.count_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_detail(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'detail', True)
req = self._get('/stacks/detail')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.detail(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.33')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_aterr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(AttributeError())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.33')
def test_index_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/stacks')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_interr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(Exception())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(500, resp.json['code'])
self.assertEqual('Exception', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.33')
def test_create(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'environment_files': ['foo.yaml'],
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
mock_call.assert_called_once_with(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': ['foo.yaml'],
'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
def test_create_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
mock_call.assert_called_once_with(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
def test_adopt(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {
"heat_template_version": "2013-05-23",
"parameters": {"app_dbx": {"type": "string"}},
"resources": {"res1": {"type": "GenericResourceType"}}}
parameters = {"app_dbx": "test"}
adopt_data = {
"status": "COMPLETE",
"name": "rtrove1",
"parameters": parameters,
"template": template,
"action": "CREATE",
"id": "8532f0d3-ea84-444e-b2bb-2543bb1496a4",
"resources": {"res1": {
"status": "COMPLETE",
"name": "database_password",
"resource_id": "yBpuUROjfGQ2gKOD",
"action": "CREATE",
"type": "GenericResourceType",
"metadata": {}}}}
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
mock_call.assert_called_once_with(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
def test_adopt_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': {},
'timeout_mins': 'not-an-int',
'adopt_stack_data': 'does not matter'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
str(ex))
mock_call.assert_not_called()
def test_adopt_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
parameters = {"app_dbx": "test"}
adopt_data = ["Test"]
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.status_code)
self.assertEqual('400 Bad Request', resp.status)
self.assertIn('Invalid adopt data', resp.text)
def test_create_with_files(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'files': {'my.yaml': 'This is the file contents.'},
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
result = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, result)
mock_call.assert_called_once_with(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {'my.yaml': 'This is the file contents.'},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
def test_create_err_rpcerr(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True, 3)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
unknown_parameter = heat_exc.UnknownUserParameter(key='a')
missing_parameter = heat_exc.UserParameterMissing(key='a')
mock_call = self.patchobject(
rpc_client.EngineClient, 'call',
side_effect=[
tools.to_remote_error(AttributeError()),
tools.to_remote_error(unknown_parameter),
tools.to_remote_error(missing_parameter),
])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UnknownUserParameter', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UserParameterMissing', resp.json['error']['type'])
mock_call.assert_called_with(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
self.assertEqual(3, mock_call.call_count)
def test_create_err_existing(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackExists(stack_name='s')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(409, resp.json['code'])
self.assertEqual('StackExists', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
def test_create_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 'not-an-int'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
str(ex))
mock_call.assert_not_called()
def test_create_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', False)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_create_err_engine(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackValidationFailed(message='')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('StackValidationFailed', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.36'
)
def test_create_err_stack_bad_reqest(self, mock_enforce):
cfg.CONF.set_override('debug', True)
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.HTTPExceptionDisguise(webob.exc.HTTPBadRequest())
self.controller.create = mock.MagicMock(side_effect=error)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create, req, body)
# When HTTP disguised exceptions reach the fault app, they are
# converted into regular responses, just like non-HTTP exceptions
self.assertEqual(400, resp.json['code'])
self.assertEqual('HTTPBadRequest', resp.json['error']['type'])
self.assertIsNotNone(resp.json['error']['traceback'])
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_stack(self, mock_format, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
body = {'stack_name': 'foo', 'template': {}, 'parameters': {}}
req = self._post('/stacks/preview', json.dumps(body))
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack'
result = self.controller.preview(req, tenant_id=self.tenant, body=body)
self.assertEqual({'stack': 'formatted_stack'}, result)
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_with_tags_timeout(self, mock_format, mock_call,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks/preview', json.dumps(body))
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack_preview'
response = self.controller.preview(req,
tenant_id=identity.tenant,
body=body)
rpc_client.EngineClient.call.assert_called_once_with(
req.context,
('preview_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}}),
version='1.36'
)
self.assertEqual({'stack': 'formatted_stack_preview'}, response)
def test_preview_update_stack(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview_update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s/preview' %
identity, json.dumps(body))
resource_changes = {'updated': [],
'deleted': [],
'unchanged': [],
'added': [],
'replaced': []}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=resource_changes)
result = self.controller.preview_update(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual({'resource_changes': resource_changes}, result)
mock_call.assert_called_once_with(
req.context,
('preview_update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30}}),
version='1.36'
)
def test_preview_update_stack_patch(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview_update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': None,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s/preview' %
identity, json.dumps(body))
resource_changes = {'updated': [],
'deleted': [],
'unchanged': [],
'added': [],
'replaced': []}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=resource_changes)
result = self.controller.preview_update_patch(
req, tenant_id=identity.tenant, stack_name=identity.stack_name,
stack_id=identity.stack_id, body=body)
self.assertEqual({'resource_changes': resource_changes}, result)
mock_call.assert_called_once_with(
req.context,
('preview_update_stack',
{'stack_identity': dict(identity),
'template': None,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}}),
version='1.36'
)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_update_immutable_parameter(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'param1': u'bar'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' %
identity, json.dumps(body))
error = heat_exc.ImmutableParameterModified(keys='param1')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('ImmutableParameterModified',
resp.json['error']['type'])
self.assertIn("The following parameters are immutable",
str(resp.json['error']['message']))
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {u'parameters': parameters,
u'encrypted_param_names': [],
u'parameter_defaults': {},
u'event_sinks': [],
u'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_lookup(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s' % identity)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name)
self.assertEqual(self._url(identity), found.location)
mock_call.assert_called_once_with(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
)
def test_lookup_arn(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks%s' % identity.arn_url_path())
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup,
req, tenant_id=identity.tenant, stack_name=identity.arn())
self.assertEqual(self._url(identity), found.location)
def test_lookup_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('identify_stack', {'stack_name': stack_name})
)
def test_lookup_err_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_lookup_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s/resources' % identity)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name,
path='resources')
self.assertEqual(self._url(identity) + '/resources',
found.location)
mock_call.assert_called_once_with(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
)
def test_lookup_resource_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('identify_stack', {'stack_name': stack_name})
)
def test_lookup_resource_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity,
params={'resolve_outputs': True})
parameters = {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'}
outputs = [{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}]
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': True,
u'timeout_mins':60,
u'capabilities': [],
}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
response = self.controller.show(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
expected = {
'stack': {
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '6',
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'capabilities': [],
u'notification_topics': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
}
self.assertEqual(expected, response)
mock_call.assert_called_once_with(
req.context,
('show_stack', {'stack_identity': dict(identity),
'resolve_outputs': True}),
version='1.20'
)
def test_show_without_resolve_outputs(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity,
params={'resolve_outputs': False})
parameters = {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'}
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': True,
u'timeout_mins':60,
u'capabilities': [],
}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
response = self.controller.show(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
expected = {
'stack': {
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '6',
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'capabilities': [],
u'notification_topics': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
}
self.assertEqual(expected, response)
mock_call.assert_called_once_with(
req.context,
('show_stack', {'stack_identity': dict(identity),
'resolve_outputs': False}),
version='1.20'
)
def test_show_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('show_stack', {'stack_identity': dict(identity),
'resolve_outputs': True}),
version='1.20'
)
# the test_show_invalidtenant for stacks is now dealt with srbac
# more generic approach
def test_deprecated_show_invalidtenant(self, mock_enforce):
identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_get_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
template = {u'Foo': u'bar'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=template)
response = self.controller.template(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(template, response)
mock_call.assert_called_once_with(
req.context,
('get_template', {'stack_identity': dict(identity)})
)
def test_get_environment(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'environment', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
env = {'parameters': {'Foo': 'bar'}}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=env)
response = self.controller.environment(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(env, response)
mock_call.assert_called_once_with(
req.context,
('get_environment', {'stack_identity': dict(identity)},),
version='1.28',
)
def test_get_files(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'files', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
files = {'foo.yaml': 'i am yaml'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=files)
response = self.controller.files(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(files, response)
mock_call.assert_called_once_with(
req.context,
('get_files', {'stack_identity': dict(identity)},),
version='1.32',
)
def test_get_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/template'
% identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_get_template_err_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('get_template', {'stack_identity': dict(identity)})
)
def test_update(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_update_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'template_id': None}),
version='1.36'
)
def test_update_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
error = heat_exc.EntityNotFound(entity='Stack', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {u'parameters': parameters,
u'encrypted_param_names': [],
u'parameter_defaults': {},
u'event_sinks': [],
u'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_update_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
str(ex))
self.assertFalse(mock_call.called)
def test_update_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', False)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_update_with_existing_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
body = {'template': None,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': None,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_update_with_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_update_with_existing_parameters_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30,
'tags': ['tag1', 'tag2']},
'template_id': None}),
version='1.36'
)
def test_update_with_patched_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_update_with_patch_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_patch, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
str(ex))
self.assertFalse(mock_call.called)
def test_update_with_existing_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': {},
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_update_with_patched_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': parameters,
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=dict(identity))
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
mock_call.assert_called_once_with(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30},
'template_id': None}),
version='1.36'
)
def test_delete(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
# Engine returns None when delete successful
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=None)
self.assertRaises(webob.exc.HTTPNoContent,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
mock_call.assert_called_once_with(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
)
def test_delete_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_export(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'export', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/export' %
identity)
# Engine returns json data
expected = {"name": "test", "id": "123"}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=expected)
ret = self.controller.export(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(expected, ret)
mock_call.assert_called_once_with(
req.context,
('export_stack', {'stack_identity': dict(identity)}),
version='1.22'
)
def test_abandon(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
# Engine returns json data on abandon completion
expected = {"name": "test", "id": "123"}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=expected)
ret = self.controller.abandon(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(expected, ret)
mock_call.assert_called_once_with(
req.context,
('abandon_stack', {'stack_identity': dict(identity)})
)
def test_abandon_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.abandon,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_delete_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.EntityNotFound(entity='Stack', name='a')
# Engine returns None when delete successful
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
)
def test_validate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
engine_response = {
u'Description': u'blah',
u'Parameters': [
{
u'NoEcho': u'false',
u'ParameterKey': u'InstanceType',
u'Description': u'Instance type'
}
]
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.validate_template(req,
tenant_id=self.tenant,
body=body)
self.assertEqual(engine_response, response)
mock_call.assert_called_once_with(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'show_nested': False,
'ignorable_errors': None}),
version='1.36'
)
def test_validate_template_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value={'Error': 'fubar'})
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
mock_call.assert_called_once_with(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'event_sinks': [],
'resource_registry': {}},
'files': {},
'environment_files': None,
'files_container': None,
'show_nested': False,
'ignorable_errors': None}),
version='1.36'
)
def test_validate_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', False)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_list_resource_types(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
engine_response = ['AWS::EC2::Instance',
'AWS::EC2::EIP',
'AWS::EC2::EIPAssociation']
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.list_resource_types(req,
tenant_id=self.tenant)
self.assertEqual({'resource_types': engine_response}, response)
mock_call.assert_called_once_with(
req.context,
('list_resource_types',
{
'support_status': None,
'type_name': None,
'heat_version': None,
'with_description': False
}),
version="1.30"
)
def test_list_resource_types_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
error = heat_exc.EntityNotFound(entity='Resource Type', name='')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('list_resource_types',
{
'support_status': None,
'type_name': None,
'heat_version': None,
'with_description': False
}),
version="1.30"
)
def test_list_resource_types_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', False)
req = self._get('/resource_types')
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_list_outputs(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_outputs', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
outputs = [
{'output_key': 'key1', 'description': 'description'},
{'output_key': 'key2', 'description': 'description1'}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=outputs)
response = self.controller.list_outputs(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual({'outputs': outputs}, response)
mock_call.assert_called_once_with(
req.context,
('list_outputs', {'stack_identity': dict(identity)}),
version='1.19'
)
def test_show_output(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show_output', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/key' % identity)
output = {'output_key': 'key',
'output_value': 'val',
'description': 'description'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=output)
response = self.controller.show_output(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
output_key='key')
self.assertEqual({'output': output}, response)
mock_call.assert_called_once_with(
req.context,
('show_output', {'output_key': 'key',
'stack_identity': dict(identity)}),
version='1.19'
)
def test_list_template_versions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_versions', True)
req = self._get('/template_versions')
engine_response = [
{'version': 'heat_template_version.2013-05-23', 'type': 'hot'},
{'version': 'AWSTemplateFormatVersion.2010-09-09', 'type': 'cfn'}]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.list_template_versions(
req, tenant_id=self.tenant)
self.assertEqual({'template_versions': engine_response}, response)
mock_call.assert_called_once_with(
req.context, ('list_template_versions', {}),
version="1.11"
)
def _test_list_template_functions(self, mock_enforce, req, engine_response,
with_condition=False):
self._mock_enforce_setup(mock_enforce, 'list_template_functions', True)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.list_template_functions(
req, tenant_id=self.tenant, template_version='t1')
self.assertEqual({'template_functions': engine_response}, response)
mock_call.assert_called_once_with(
req.context, (
'list_template_functions',
{'template_version': 't1', 'with_condition': with_condition}),
version="1.35"
)
def test_list_template_functions(self, mock_enforce):
req = self._get('/template_versions/t1/functions')
engine_response = [
{'functions': 'func1', 'description': 'desc1'},
]
self._test_list_template_functions(mock_enforce, req, engine_response)
def test_list_template_funcs_includes_condition_funcs(self, mock_enforce):
params = {'with_condition_func': 'true'}
req = self._get('/template_versions/t1/functions', params=params)
engine_response = [
{'functions': 'func1', 'description': 'desc1'},
{'functions': 'condition_func', 'description': 'desc2'}
]
self._test_list_template_functions(mock_enforce, req, engine_response,
with_condition=True)
def test_resource_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/ResourceWithProps')
type_name = 'ResourceWithProps'
engine_response = {
'resource_type': type_name,
'properties': {
'Foo': {'type': 'string', 'required': False},
},
'attributes': {
'foo': {'description': 'A generic attribute'},
'Foo': {'description': 'Another generic attribute'},
},
'support_status': {
'status': 'SUPPORTED',
'version': None,
'message': None,
},
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.resource_schema(req,
tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(engine_response, response)
mock_call.assert_called_once_with(
req.context,
('resource_schema', {'type_name': type_name,
'with_description': False}),
version='1.30'
)
def test_resource_schema_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
error = heat_exc.EntityNotFound(entity='Resource Type',
name='BogusResourceType')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('resource_schema', {'type_name': type_name,
'with_description': False}),
version='1.30'
)
def test_resource_schema_faulty_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/FaultyTemplate')
type_name = 'FaultyTemplate'
error = heat_exc.InvalidGlobalResource(type_name='FaultyTemplate')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(500, resp.json['code'])
self.assertEqual('InvalidGlobalResource', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('resource_schema', {'type_name': type_name,
'with_description': False}),
version='1.30'
)
def test_resource_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', False)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
def test_generate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/TEST_TYPE/template')
engine_response = {'Type': 'TEST_TYPE'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
self.controller.generate_template(req, tenant_id=self.tenant,
type_name='TEST_TYPE')
mock_call.assert_called_once_with(
req.context,
('generate_template', {'type_name': 'TEST_TYPE',
'template_type': 'cfn'}),
version='1.9'
)
def test_generate_template_invalid_template_type(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
params = {'template_type': 'invalid'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
req = self._get('/resource_types/TEST_TYPE/template',
params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.assertIn('Template type is not supported: Invalid template '
'type "invalid", valid types are: cfn, hot.',
str(ex))
self.assertFalse(mock_call.called)
def test_generate_template_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/NOT_FOUND/template')
error = heat_exc.EntityNotFound(entity='Resource Type', name='a')
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=tools.to_remote_error(error))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='NOT_FOUND')
self.assertEqual(404, resp.json['code'])
self.assertEqual('EntityNotFound', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context,
('generate_template', {'type_name': 'NOT_FOUND',
'template_type': 'cfn'}),
version='1.9'
)
def test_generate_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', False)
req = self._get('/resource_types/NOT_FOUND/template')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='blah')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
class StackSerializerTest(common.HeatTestCase):
def setUp(self):
super(StackSerializerTest, self).setUp()
self.serializer = stacks.StackSerializer()
def test_serialize_create(self):
result = {'stack':
{'id': '1',
'links': [{'href': 'location', "rel": "self"}]}}
response = webob.Response()
response = self.serializer.create(response, result)
self.assertEqual(201, response.status_int)
self.assertEqual('location', response.headers['Location'])
self.assertEqual('application/json', response.headers['Content-Type'])
|
openstack/heat
|
heat/tests/api/openstack_v1/test_stacks.py
|
Python
|
apache-2.0
| 119,840
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("MLPRegressor" , "RandomReg_500" , "oracle")
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/RandomReg_500/ws_RandomReg_500_MLPRegressor_oracle_code_gen.py
|
Python
|
bsd-3-clause
| 133
|
### This program intends to list all reference in NodeExpandList.csv;
### Author: Ye Gao
### Date: 2017-11-8
import csv
import os
import re
import scrapy
file = open('RootPath.dat', 'r')
path = (file.read()).replace("\n", "") # read path from path.dat;
file.close()
LocalPath = 'file://' + path + 'rl.html'
file = open('NodeExpandList.csv', 'rb')
reader = csv.reader(file)
NodeList = list(reader)
file.close()
FirstRow = NodeList.pop(0)
NodePathList = []
for element in NodeList:
NodePathList.append(element[1])
index = NodePathList.index(path)
journal = NodeList[index][6]
class QuotesSpider(scrapy.Spider):
name = "ListReference"
start_urls = [LocalPath]
def parse(self, response):
ReferenceList = []
if (journal == "ACMDL") or (journal == "RGate"):
ReferencePiece = []
for reference in response.xpath('//table[@border="0"]//tr[@valign="top"]/td/div//text()').extract():
reference = (reference.strip()).replace("\n","")
if reference != "" and reference != "]" and reference != "[doi>" and reference[0:3] !="10.":
ReferencePiece.append(reference)
CounterReference = 0
for element in ReferencePiece:
if element.isdigit():
if int(element) == CounterReference + 1:
CounterReference += 1
print CounterReference
ReferenceCombine = []
counter = len(ReferencePiece) - 1
ReferenceEntry = ""
for element in ReferencePiece:
if ReferencePiece[counter].isdigit() == False:
ReferenceEntry = str(ReferencePiece[counter].encode('ascii', 'ignore')) + ' ' + ReferenceEntry
else:
if int(ReferencePiece[counter]) != CounterReference:
ReferenceEntry = str(ReferencePiece[counter].encode('ascii', 'ignore')) + ReferenceEntry
else:
ReferenceCombine.append((ReferenceEntry.replace(",","|")).replace(";","|"))
ReferenceEntry = ""
CounterReference = CounterReference - 1
counter = counter - 1
ReferenceList = ReferenceCombine[::-1]
elif journal == "Elsevier":
ReferencePiece = []
for reference in response.xpath('//div/dl[@class="bib-section"]//text()').extract():
reference = (reference.strip()).replace("\n","")
if reference != "" and reference != "]" and reference != "[doi>" and reference[0:3] !="10.":
ReferencePiece.append(reference)
ReferenceCombine = []
counter = len(ReferencePiece) - 1
temp = ""
for element in ReferencePiece:
if (((ReferencePiece[counter]).replace('[', '')).replace(']', '')).isdigit() == False:
temp = str(ReferencePiece[counter].encode('ascii', 'ignore')) + temp
else:
ReferenceCombine.append((temp.replace(",","|")).replace(";","|"))
temp = ""
counter = counter - 1
ReferenceList = ReferenceCombine[::-1]
elif journal == "IEEE":
with open(path+'rl.html', 'r') as myfile:
data=myfile.read()
# extract content from body;
temp_1 = re.split(r"<body>", data)
temp_2 = re.split(r"</body>", temp_1[1])
body = temp_2[0]
notag = ((body.replace(""","")).replace("<em>","")).replace("</em>","") # remove tags;
symbol = (notag.replace("&", "&")).replace("'", "'") # replace utf-8 symbols;
extract = ((((symbol.replace("\n","")).replace("\r","")).replace("\t","")).replace(",","|")).replace(";","|")
ReferenceList = re.split(r"</br></br>", extract)
ReferenceList.pop() # delete last empty element;
print ReferenceList
# if journal is kuleuven;
elif journal == "kuleuven":
with open((response.url).replace("file://", ""), 'r') as myfile:
data=myfile.read()
temp_1 = ((data.replace(",", "|")).replace(";", "|")).replace("\n", " ")
ReferenceList = re.split(r"<br>", temp_1)
ReferenceList.pop() # delete last empty element;
# if journal is Springer;
elif journal == "Springer":
ReferencePiece = []
for reference in response.xpath('//div[@class="CitationNumber"]/text() | //div[@class="CitationContent"]/text()').extract():
reference = (reference.strip()).replace("\n","")
ReferencePiece.append(((reference.encode('ascii', 'ignore')).replace(',','|')).replace(';','|'))
print ReferencePiece
ReferenceCombine = []
counter = len(ReferencePiece) - 1
temp = ""
for element in ReferencePiece:
if (ReferencePiece[counter].replace('[', '').replace(']', '')).isdigit() == False:
temp = str(ReferencePiece[counter].encode('ascii', 'ignore')) + temp
else:
ReferenceCombine.append((temp.replace(",","|")).replace(";","|"))
temp = ""
counter = counter - 1
ReferenceList = ReferenceCombine[::-1]
# if journal is Wiley;
elif journal == "Wiley":
ReferenceList = []
ReferenceAll = ""
for reference in response.xpath('//ul[@class="article-section__references-list"]/li/cite').extract():
reference = ((reference.strip()).replace("\n","")).encode('ascii', 'ignore')
reference = (reference.replace(",", "|")).replace(";", "")
step_1 = re.sub("</span>", "", reference)
step_2 = re.sub('<span class="pageLast">', '', step_1)
step_3 = re.sub('<span class="pageFirst">', '', step_2)
step_4 = re.sub('<span class="pubYear">', '', step_3)
step_5 = re.sub('<span class="author">', '', step_4)
step_6 = re.sub('<span class="vol">', '', step_5)
step_7 = re.sub('<span class="citedIssue">', '', step_6)
step_8 = re.sub('<span class="articleTitle">', '', step_7)
step_9 = re.sub('<span class="journalTitle">', '', step_8)
step_10 = re.sub('<cite id="cit..">', '', step_9)
step_11 = re.sub('<cite id="cit.">', '', step_10)
step_12 = re.sub('<em>', '', step_11)
step_13 = re.sub('</em>', '', step_12)
step_14 = re.sub('<span class="publisherLocation">', '', step_13)
step_15 = re.sub('<span class="otherTitle">', '', step_14)
step_16 = re.sub('<span class="bookTitle">', '', step_15)
step_17 = re.sub('<cite id="cgf\d\d\d\d\d-cit-00\d\d">', '', step_16)
step_18 = re.sub('<span class="chapterTitle">', '', step_17)
ReferenceAll += step_18
ReferenceList = re.split(r"</cite>", ReferenceAll)
ReferenceList.pop() # delete last element;
print ReferenceList
# save reference list to csv file;
print ReferenceList
file = open('ReferenceList.csv','wb')
for element in ReferenceList:
os.system("mkdir " + path + "/" + str(ReferenceList.index(element)+1))
file.write(str(ReferenceList.index(element)+1))
file.write(',')
file.write(element)
file.write('\n')
file.close()
|
sortsimilar/Citation-Tree
|
listref.py
|
Python
|
apache-2.0
| 6,423
|
"""Allauth overrides"""
import pickle
import logging
from allauth.account.adapter import DefaultAccountAdapter
from django.template.loader import render_to_string
from readthedocs.core.utils import send_email
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
log = logging.getLogger(__name__)
class AccountAdapter(DefaultAccountAdapter):
"""Customize Allauth emails to match our current patterns"""
def format_email_subject(self, subject):
return force_text(subject)
def send_mail(self, template_prefix, email, context):
subject = render_to_string(
'{0}_subject.txt'.format(template_prefix), context
)
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
# Allauth sends some additional data in the context, remove it if the
# pieces can't be pickled
removed_keys = []
for key in context.keys():
try:
_ = pickle.dumps(context[key])
except (pickle.PickleError, TypeError):
removed_keys.append(key)
del context[key]
if removed_keys:
log.debug('Removed context we were unable to serialize: %s',
removed_keys)
send_email(
recipient=email,
subject=subject,
template='{0}_message.txt'.format(template_prefix),
template_html='{0}_message.html'.format(template_prefix),
context=context
)
|
tddv/readthedocs.org
|
readthedocs/core/adapters.py
|
Python
|
mit
| 1,609
|
"""Logical relations between sequences of truth conditions.
https://en.wikipedia.org/wiki/Template:Logical_connectives_table_and_Hasse_diagram
https://commons.wikimedia.org/wiki/File:Logical_connectives_Hasse_diagram.svg
https://commons.wikimedia.org/wiki/File:Logical_connectives_table.svg
https://commons.wikimedia.org/wiki/File:Logic_matrix;_operations.svg
"""
from itertools import combinations, chain
__all__ = ['Relations']
class Relations(list):
"""Logical relations between items from their contingent truth condition sequences.
>>> Relations(['+1', '-2 -3'], [(True, False, False), (True, False, False)])
[<Equivalent('+1', '-2 -3')>]
>>> Relations(['+1', '-1'], [(True, False, False), (False, True, True)])
[<Complement('+1', '-1')>]
>>> Relations(['+1', '+3'], [(True, False, False), (False, False, True)])
[<Incompatible('+1', '+3')>]
>>> Relations(['+1', '-3'], [(True, False, False), (True, True, False)])
[<Implication('+1', '-3')>]
>>> Relations(['-1', '-3'], [(False, True, True), (True, True, False)])
[<Subcontrary('-1', '-3')>]
>>> Relations(['+1', 'sg'], [(True, True, False, False), (True, False, True, False)])
[<Orthogonal('+1', 'sg')>]
>>> r = Relations(['Never', 'Always', 'Possibly', 'Maybe'],
... [(False, False), (True, True), (True, False), (True, False)],
... include_unary=True)
>>> r # doctest: +NORMALIZE_WHITESPACE
[<Contradiction('Never')>, <Tautology('Always')>,
<Contingency('Possibly')>, <Contingency('Maybe')>,
<Equivalent('Possibly', 'Maybe')>]
>>> print(r) # noqa: W291
Never contradiction
Always tautology
Possibly contingency
Maybe contingency
Possibly equivalent Maybe
>>> print(r[0])
Never contradiction
>>> print(r[-1])
Possibly equivalent Maybe
"""
def __init__(self, items, booleans,
include_unary: bool = False) -> None:
"""Filter out items with tautological or contradictory booleans."""
unary = [Relation(i, None, bools)
for i, bools in zip(items, booleans)]
combos = combinations(((u.left, u.bools)
for u in unary if u.__class__ is Contingency), 2)
binary = (Relation(l, r, zip(lbools, rbools))
for (l, lbools), (r, rbools) in combos)
members = chain(unary, binary) if include_unary else binary
super().__init__(members)
self.sort(key=lambda r: r.order)
def __str__(self) -> str:
return self.tostring(exclude_orthogonal=True)
def tostring(self, exclude_orthogonal: bool = False) -> str:
tmpl = '%%-%ds %%-12s %%s' % max(len(str(r.left)) for r in self)
if exclude_orthogonal:
self = (r for r in self if r.__class__ is not Orthogonal)
return '\n'.join(tmpl % (r.left, r.kind, r.right) for r in self)
class RelationMeta(type):
"""Build and retrieve conrete ``Relation`` subclasses from docstring tables."""
__map = {}
def __init__(self, name, bases, dct) -> None: # noqa: N804
if 'binary' not in dct:
return
table = self.__doc__.strip().partition('\n\n')[2].strip().splitlines()
symbols = {'T': True, 'F': False}
if self.binary:
def get_prop(fg):
return tuple(symbols[f] for f in fg.strip())
else:
def get_prop(fg):
return symbols[fg.strip()]
properties = [get_prop(fg) for fg in table[0].strip('|').split('|')]
obj_flags = [(obj.split(), [bool(p.strip()) for p in props.split('|')])
for obj, props in (l.strip('|').partition('|')[::2] for l in table[1:])]
for index, ((name, symbol, order), symbols) in enumerate(obj_flags):
pattern = frozenset(p for p, f in zip(properties, symbols) if f)
ns = {'index': index, 'order': int(order),
'kind': name.lower(), 'symbol': symbol, 'pattern': pattern}
cls = type(name, (self,), ns)
globals()[cls.__name__] = self.__map[pattern] = cls
__all__.append(cls.__name__)
def __call__(self, left, right, pairs): # noqa: N804
self = self.__map[frozenset(pairs)]
if not self.binary:
right = pairs
elif self is Replication:
self = Implication
left, right = right, left
return super().__call__(left, right)
class Relation(metaclass=RelationMeta):
"""Logical characteristics of truth condition sequences."""
class Unary(Relation):
"""Logical property of a single truth condition sequence.
|T|F|
Contingency ~ 0|X|X|
Contradiction t -2| |X|
Tautology f -1|X| |
"""
binary = False
right = ''
def __init__(self, left, bools) -> None:
self.left = left
self.bools = bools
def __str__(self) -> str:
return f'{self.left} {self.kind}'
def __repr__(self) -> str:
return f'<{self.__class__.__name__}({self.left!r})>'
class Binary(Relation):
"""Logical relation between two contingent truth condition sequences.
|TT|TF|FT|FF|
Orthogonal ~ 7| X| X| X| X|
Subcontrary v 6| X| X| X| |
Implication -> 4| X| | X| X|
Replication <- 5| X| X| | X|
Equivalent <-> 1| X| | | X|
Incompatible ! 3| | X| X| X|
Complement >-< 2| | X| X| |
"""
binary = True
def __init__(self, left, right) -> None:
self.left = left
self.right = right
def __str__(self) -> str:
return f'{self.left} {self.kind} {self.right}'
def __repr__(self) -> str:
return f'<{self.__class__.__name__}({self.left!r}, {self.right!r})>'
|
xflr6/concepts
|
concepts/junctors.py
|
Python
|
mit
| 5,798
|
#!/usr/bin/env python
#Quickly clean my music, videos and images into a different directory.
#QuickClean.py
#Version 0.02
import glob
import sys
import os
import shutil
import argparse
parser = argparse.ArgumentParser(description='A quick way to clean out your cluttered folders.')
parser.add_argument('-s','--source', help='Directory you want to clean out. If no other argumnets are provided, will create relevant directories under source directory', required=True)
parser.add_argument('-m','--music', help='Directory you want to copy music files to. If absolute path is not provided, will create directory inside the source directory', default='music', required=False)
parser.add_argument('-p','--pictures', help='Directory you want to copy picture files to. If absolute path is not provided, will create directory inside the source directory', default='pictures', required=False)
parser.add_argument('-v','--videos', help='Directory you want to copy video files to. If absolute path is not provided, will create directory inside the source directory', default='videos', required=False)
parser.add_argument('-d','--documents', help='Directory you want to copy document files to. If absolute path is not provided, will create directory inside the source directory', default='documents',required=False)
args = parser.parse_args()
os.chdir(args.source)
#Parameters to clean.
music = glob.glob("*.mp3") + glob.glob("*.flac") + glob.glob("*.aac")
pictures = glob.glob("*.png") + glob.glob ('*.jpg') + glob.glob("*.bmp")+ glob.glob("*.gif")
videos = glob.glob("*.avi") + glob.glob("*.mp4") + glob.glob("*.flv") + glob.glob("*.mkv") + glob.glob("*.mov")
documents = glob.glob ('*.pdf') + glob.glob ('*.PDF') + glob.glob ("*.xls") + glob.glob ("*.xlsx") + glob.glob ("*.pptx") + glob.glob ("*.docx") + glob.glob("*.m") + glob.glob("*.ppt") + glob.glob("*.doc")
#Copies to destination directory, and then deletes the file. This is done because shutil's copy method has the ability to overwrite.
for songs in music:
if not os.path.exists(args.music):
os.mkdir(args.music)
shutil.copy(songs,args.music)
os.remove(songs)
for video in videos:
if not os.path.exists(args.videos):
os.mkdir(args.videos)
shutil.copy(video,args.videos)
os.remove(video)
for picture in pictures:
if not os.path.exists(args.pictures):
os.mkdir(args.pictures)
shutil.copy(picture,args.pictures)
os.remove(picture)
for document in documents:
if not os.path.exists(args.documents):
os.mkdir(args.documents)
shutil.copy(document,args.documents)
os.remove(document)
|
shayekharjan/QuickClean.py
|
QuickClean.py
|
Python
|
mit
| 2,560
|
#!/usr/bin/env python
from unittest import TestCase
from fundamentals.recursion.hanoi.tower import Tower
class TestTower(TestCase):
def test_adding(self):
t = Tower(0, "a")
t.add(3)
t.add(2)
self.assertEqual(2, t.size())
t.pop()
t.add(1)
self.assertEqual(2, t.size())
try:
self.add(5)
except:
pass
self.assertEqual(2, t.size())
|
davjohnst/fundamentals
|
tests/recursion/hanoi/test_tower.py
|
Python
|
apache-2.0
| 445
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import time
import poplib
import frappe
from frappe.utils import extract_email_id, convert_utc_to_user_timezone, now, cint
from frappe.utils.scheduler import log
class EmailSizeExceededError(frappe.ValidationError): pass
class EmailTimeoutError(frappe.ValidationError): pass
class TotalSizeExceededError(frappe.ValidationError): pass
class IncomingMail:
"""
Single incoming email object. Extracts, text / html and attachments from the email
"""
def __init__(self, content):
import email, email.utils
import datetime
self.mail = email.message_from_string(content)
self.text_content = ''
self.html_content = ''
self.attachments = []
self.parse()
self.set_content_and_type()
self.set_subject()
self.from_email = extract_email_id(self.mail["From"])
self.from_real_name = email.utils.parseaddr(self.mail["From"])[0]
if self.mail["Date"]:
utc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail["Date"]))
utc_dt = datetime.datetime.utcfromtimestamp(utc)
self.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')
else:
self.date = now()
def parse(self):
for part in self.mail.walk():
self.process_part(part)
def set_subject(self):
import email.header
_subject = email.header.decode_header(self.mail.get("Subject", "No Subject"))
self.subject = _subject[0][0] or ""
if _subject[0][1]:
self.subject = self.subject.decode(_subject[0][1])
else:
# assume that the encoding is utf-8
self.subject = self.subject.decode("utf-8")
if not self.subject:
self.subject = "No Subject"
def set_content_and_type(self):
self.content, self.content_type = '[Blank Email]', 'text/plain'
if self.text_content:
self.content, self.content_type = self.text_content, 'text/plain'
else:
self.content, self.content_type = self.html_content, 'text/html'
def process_part(self, part):
content_type = part.get_content_type()
charset = part.get_content_charset()
if not charset: charset = self.get_charset(part)
if content_type == 'text/plain':
self.text_content += self.get_payload(part, charset)
if content_type == 'text/html':
self.html_content += self.get_payload(part, charset)
if part.get_filename():
self.get_attachment(part, charset)
def get_text_content(self):
return self.text_content or self.html_content
def get_charset(self, part):
charset = part.get_content_charset()
if not charset:
import chardet
charset = chardet.detect(str(part))['encoding']
return charset
def get_payload(self, part, charset):
try:
return unicode(part.get_payload(decode=True),str(charset),"ignore")
except LookupError:
return part.get_payload()
def get_attachment(self, part, charset):
self.attachments.append({
'content-type': part.get_content_type(),
'filename': part.get_filename(),
'content': part.get_payload(decode=True),
})
def save_attachments_in_doc(self, doc):
from frappe.utils.file_manager import save_file, MaxFileSizeReachedError
for attachment in self.attachments:
try:
fid = save_file(attachment['filename'], attachment['content'],
doc.doctype, doc.name)
except MaxFileSizeReachedError:
# WARNING: bypass max file size exception
pass
except frappe.DuplicateEntryError:
# same file attached twice??
pass
def get_thread_id(self):
import re
l = re.findall('(?<=\[)[\w/-]+', self.subject)
return l and l[0] or None
class POP3Mailbox:
def __init__(self, args=None):
self.setup(args)
self.get_messages()
def setup(self, args=None):
# overrride
self.settings = args or frappe._dict()
def check_mails(self):
# overrride
return True
def process_message(self, mail):
# overrride
pass
def connect(self):
if cint(self.settings.use_ssl):
self.pop = Timed_POP3_SSL(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
else:
self.pop = Timed_POP3(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
self.pop.user(self.settings.username)
self.pop.pass_(self.settings.password)
def get_messages(self):
if not self.check_mails():
return # nothing to do
frappe.db.commit()
self.connect()
try:
# track if errors arised
self.errors = False
pop_list = self.pop.list()[1]
num = num_copy = len(pop_list)
# WARNING: Hard coded max no. of messages to be popped
if num > 20: num = 20
# size limits
self.total_size = 0
self.max_email_size = cint(frappe.local.conf.get("max_email_size"))
self.max_total_size = 5 * self.max_email_size
for i, pop_meta in enumerate(pop_list):
# do not pull more than NUM emails
if (i+1) > num:
break
try:
self.retrieve_message(pop_meta, i+1)
except (TotalSizeExceededError, EmailTimeoutError):
break
# WARNING: Mark as read - message number 101 onwards from the pop list
# This is to avoid having too many messages entering the system
num = num_copy
if num > 100 and not self.errors:
for m in xrange(101, num+1):
self.pop.dele(m)
finally:
# no matter the exception, pop should quit if connected
self.pop.quit()
def retrieve_message(self, pop_meta, msg_num):
incoming_mail = None
try:
self.validate_pop(pop_meta)
msg = self.pop.retr(msg_num)
incoming_mail = IncomingMail(b'\n'.join(msg[1]))
frappe.db.begin()
self.process_message(incoming_mail)
frappe.db.commit()
except (TotalSizeExceededError, EmailTimeoutError):
# propagate this error to break the loop
raise
except:
# log performs rollback and logs error in scheduler log
log("receive.get_messages", self.make_error_msg(msg_num, incoming_mail))
self.errors = True
frappe.db.rollback()
self.pop.dele(msg_num)
else:
self.pop.dele(msg_num)
def validate_pop(self, pop_meta):
# throttle based on email size
if not self.max_email_size:
return
m, size = pop_meta.split()
size = cint(size)
if size < self.max_email_size:
self.total_size += size
if self.total_size > self.max_total_size:
raise TotalSizeExceededError
else:
raise EmailSizeExceededError
def make_error_msg(self, msg_num, incoming_mail):
error_msg = "Error in retrieving email."
if not incoming_mail:
try:
# retrieve headers
incoming_mail = IncomingMail(b'\n'.join(self.pop.top(msg_num, 5)[1]))
except:
pass
if incoming_mail:
error_msg += "\nDate: {date}\nFrom: {from_email}\nSubject: {subject}\n".format(
date=incoming_mail.date, from_email=incoming_mail.from_email, subject=incoming_mail.subject)
return error_msg
class TimerMixin(object):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop('timeout', 0.0)
self.elapsed_time = 0.0
self._super.__init__(self, *args, **kwargs)
if self.timeout:
# set per operation timeout to one-fifth of total pop timeout
self.sock.settimeout(self.timeout / 5.0)
def _getline(self, *args, **kwargs):
start_time = time.time()
ret = self._super._getline(self, *args, **kwargs)
self.elapsed_time += time.time() - start_time
if self.timeout and self.elapsed_time > self.timeout:
raise EmailTimeoutError
return ret
def quit(self, *args, **kwargs):
self.elapsed_time = 0.0
return self._super.quit(self, *args, **kwargs)
class Timed_POP3(TimerMixin, poplib.POP3):
_super = poplib.POP3
class Timed_POP3_SSL(TimerMixin, poplib.POP3_SSL):
_super = poplib.POP3_SSL
|
geo-poland/frappe
|
frappe/utils/email_lib/receive.py
|
Python
|
mit
| 7,591
|
# -*- coding: utf-8 -*-
import datetime
import httplib as http
import time
import furl
import itsdangerous
import jwe
import jwt
import mock
from django.utils import timezone
from framework.auth import cas, signing
from framework.auth.core import Auth
from framework.exceptions import HTTPError
from modularodm import Q
from nose.tools import * # noqa
from osf_tests import factories
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import (AuthUserFactory, ProjectFactory,
RegistrationFactory)
from website import settings
from addons.base import views
from addons.github.exceptions import ApiError
from addons.github.models import GithubFolder, GithubFile, GithubFileNode
from addons.github.tests.factories import GitHubAccountFactory
from osf.models import Session, MetaSchema
from osf.models import files as file_models
from osf.models.files import BaseFileNode, TrashedFileNode
from website.project import new_private_link
from website.project.model import ensure_schemas
from website.project.views.node import _view_project as serialize_node
from website.util import api_url_for, rubeus
class SetEnvironMiddleware(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
environ.update(self.kwargs)
return self.app(environ, start_response)
class TestAddonAuth(OsfTestCase):
def setUp(self):
super(TestAddonAuth, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
self.JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def build_url(self, **kwargs):
options = {'payload': jwe.encrypt(jwt.encode({'data': dict(dict(
action='download',
nid=self.node._id,
provider=self.node_addon.config.short_name), **kwargs),
'exp': timezone.now() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), self.JWE_KEY)}
return api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url()
res = self.app.get(url, auth=self.user.auth)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_missing_args(self):
url = self.build_url(cookie=None)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_bad_cookie(self):
url = self.build_url(cookie=self.cookie)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 200)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_cookie(self):
url = self.build_url(cookie=self.cookie[::-1])
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_missing_addon(self):
url = self.build_url(provider='queenhub')
res = self.app.get(url, expect_errors=True, auth=self.user.auth)
assert_equal(res.status_code, 400)
@mock.patch('addons.base.views.cas.get_client')
def test_auth_bad_bearer_token(self, mock_cas_client):
mock_cas_client.return_value = mock.Mock(profile=mock.Mock(return_value=cas.CasResponse(authenticated=False)))
url = self.build_url()
res = self.app.get(url, headers={'Authorization': 'Bearer invalid_access_token'}, expect_errors=True)
assert_equal(res.status_code, 403)
class TestAddonLogs(OsfTestCase):
def setUp(self):
super(TestAddonLogs, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider=self.node_addon.config.short_name,
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value in options.iteritems()
if value is not None
}
message, signature = signing.default_signer.sign_payload(options)
return {
'payload': message,
'signature': signature,
}
@mock.patch('website.notifications.events.files.FileAdded.perform')
def test_add_log(self, mock_perform):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = self.node.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(self.node.logs.count(), nlogs + 1)
# # Mocking form_message and perform so that the payload need not be exact.
# assert_true(mock_form_message.called, "form_message not called")
assert_true(mock_perform.called, 'perform not called')
def test_add_log_missing_args(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth=None)
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_log_no_user(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth={'id': None})
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_log_no_addon(self):
path = 'pizza'
node = ProjectFactory(creator=self.user)
url = node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(node.logs.count(), nlogs)
def test_add_log_bad_action(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, action='dance')
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_action_file_rename(self):
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(
action='rename',
metadata={
'path': 'foo',
},
source={
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'new.txt',
'kind': 'file',
},
destination={
'path': 'foo',
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'old.txt',
'kind': 'file',
},
)
self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'}
)
self.node.reload()
assert_equal(
self.node.logs.latest().action,
'github_addon_file_renamed',
)
class TestCheckAuth(OsfTestCase):
def setUp(self):
super(TestCheckAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission(self):
res = views.check_access(self.node, Auth(user=self.user), 'upload', None)
assert_true(res)
def test_not_has_permission_read_public(self):
self.node.is_public = True
self.node.save()
views.check_access(self.node, Auth(), 'download', None)
def test_not_has_permission_read_has_link(self):
link = new_private_link('red-special', self.user, [self.node], anonymous=False)
views.check_access(self.node, Auth(private_key=link.key), 'download', None)
def test_not_has_permission_logged_in(self):
user2 = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(user=user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
def test_not_has_permission_not_logged_in(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(), 'download', None)
assert_equal(exc_info.exception.code, 401)
def test_has_permission_on_parent_node_copyto_pass_if_registration(self):
component_admin = AuthUserFactory()
ProjectFactory(creator=component_admin, parent=self.node)
registration = RegistrationFactory(project=self.node)
component_registration = registration._nodes.first()
assert_false(component_registration.has_permission(self.user, 'write'))
res = views.check_access(component_registration, Auth(user=self.user), 'copyto', None)
assert_true(res)
def test_has_permission_on_parent_node_metadata_pass_if_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node, is_public=False)
component_registration = RegistrationFactory(project=component, creator=component_admin)
assert_false(component_registration.has_permission(self.user, 'read'))
res = views.check_access(component_registration, Auth(user=self.user), 'metadata', None)
assert_true(res)
def test_has_permission_on_parent_node_copyto_fail_if_not_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError):
views.check_access(component, Auth(user=self.user), 'copyto', None)
def test_has_permission_on_parent_node_copyfrom(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'copyfrom', None)
assert_true(res)
class TestCheckPreregAuth(OsfTestCase):
def setUp(self):
super(TestCheckPreregAuth, self).setUp()
ensure_schemas()
self.prereg_challenge_admin_user = AuthUserFactory()
self.prereg_challenge_admin_user.add_system_tag(settings.PREREG_ADMIN_TAG)
self.prereg_challenge_admin_user.save()
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
self.user = AuthUserFactory()
self.node = factories.ProjectFactory(creator=self.user)
self.parent = factories.ProjectFactory()
self.child = factories.NodeFactory(parent=self.parent)
self.draft_registration = factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema,
branched_from=self.parent
)
def test_has_permission_download_prereg_challenge_admin(self):
res = views.check_access(self.draft_registration.branched_from,
Auth(user=self.prereg_challenge_admin_user), 'download', None)
assert_true(res)
def test_has_permission_download_on_component_prereg_challenge_admin(self):
try:
res = views.check_access(self.draft_registration.branched_from._nodes.first(),
Auth(user=self.prereg_challenge_admin_user), 'download', None)
except Exception:
self.fail()
assert_true(res)
def test_has_permission_download_not_prereg_challenge_admin(self):
new_user = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.draft_registration.branched_from,
Auth(user=new_user), 'download', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
def test_has_permission_download_prereg_challenge_admin_not_draft(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node,
Auth(user=self.prereg_challenge_admin_user), 'download', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
def test_has_permission_write_prereg_challenge_admin(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.draft_registration.branched_from,
Auth(user=self.prereg_challenge_admin_user), 'write', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
class TestCheckOAuth(OsfTestCase):
def setUp(self):
super(TestCheckOAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission_private_not_authenticated(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=False)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_private_no_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_public_irrelevant_scope_allowed(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=True, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all_read'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_private_irrelevant_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all_read'}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_decommissioned_scope_no_error(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {
'decommissioned.scope+write',
'osf.nodes.data_read',
}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_write_scope_read_action(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data_write'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_read_scope_write_action_forbidden(self):
component = ProjectFactory(creator=self.user, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data_read'}})
assert_true(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'upload', cas_resp)
assert_equal(exc_info.exception.code, 403)
def assert_urls_equal(url1, url2):
furl1 = furl.furl(url1)
furl2 = furl.furl(url2)
for attr in ['scheme', 'host', 'port']:
setattr(furl1, attr, None)
setattr(furl2, attr, None)
# Note: furl params are ordered and cause trouble
assert_equal(dict(furl1.args), dict(furl2.args))
furl1.args = {}
furl2.args = {}
assert_equal(furl1, furl2)
def mock_touch(self, bearer, version=None, revision=None, **kwargs):
if version:
if self.versions:
try:
return self.versions[int(version) - 1]
except (IndexError, ValueError):
return None
else:
return None
return file_models.FileVersion()
@mock.patch('addons.github.models.GithubFileNode.touch', mock_touch)
@mock.patch('addons.github.models.GitHubClient.repo', mock.Mock(side_effect=ApiError))
class TestAddonFileViews(OsfTestCase):
def setUp(self):
super(TestAddonFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.project.add_addon('github', auth=Auth(self.user))
self.user_addon = self.user.get_addon('github')
self.node_addon = self.project.get_addon('github')
self.oauth = GitHubAccountFactory()
self.oauth.save()
self.user.external_accounts.add(self.oauth)
self.user.save()
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth
self.node_addon.repo = 'Truth'
self.node_addon.user = 'E'
self.node_addon.save()
self.user_addon.oauth_grants[self.project._id] = {self.oauth._id: []}
self.user_addon.save()
def get_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test',
node=self.project,
path='/test/Test',
materialized_path='/test/Test',
)
ret.save()
ret.versions.add(version)
return ret
def get_second_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test2',
node=self.project,
path='/test/Test2',
materialized_path='/test/Test2',
)
ret.save()
ret.versions.add(version)
return ret
def get_mako_return(self):
ret = serialize_node(self.project, Auth(self.user), primary=True)
ret.update({
'error': '',
'provider': '',
'file_path': '',
'sharejs_uuid': '',
'private': '',
'urls': {
'files': '',
'render': '',
'sharejs': '',
'mfr': '',
'gravatar': '',
'external': '',
'archived_from': '',
},
'size': '',
'extra': '',
'file_name': '',
'materialized_path': '',
'file_id': '',
})
ret.update(rubeus.collect_addon_assets(self.project))
return ret
def test_redirects_to_guid(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github'
),
auth=self.user.auth
)
assert_equals(resp.status_code, 302)
assert_equals(resp.location, 'http://localhost:80/{}/'.format(guid._id))
def test_action_download_redirects_to_download(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, version=''))
def test_action_download_redirects_to_download_with_version(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download&revision=1'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
# Note: version is added but us but all other url params are added as well
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, revision=1, version=''))
@mock.patch('addons.base.views.addon_view_file')
def test_action_view_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/?action=view'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user._id, self.user._id)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
@mock.patch('addons.base.views.addon_view_file')
def test_no_action_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user._id, self.user._id)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
def test_download_create_guid(self):
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
),
auth=self.user.auth
)
assert_true(file_node.get_guid())
def test_view_file_does_not_delete_file_when_requesting_invalid_version(self):
with mock.patch('addons.github.models.NodeSettings.is_private',
new_callable=mock.PropertyMock) as mock_is_private:
mock_is_private.return_value = False
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
url = self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
)
# First view generated GUID
self.app.get(url, auth=self.user.auth)
self.app.get(url + '?version=invalid', auth=self.user.auth, expect_errors=True)
assert_is_not_none(BaseFileNode.load(file_node._id))
assert_is_none(TrashedFileNode.load(file_node._id))
def test_unauthorized_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_nonstorage_addons_raise(self):
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path='sillywiki',
provider='wiki',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_head_returns_url(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None, version=''))
def test_head_returns_url_with_version(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/?revision=1&foo=bar'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
# Note: version is added but us but all other url params are added as well
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None, revision=1, version='', foo='bar'))
def test_nonexistent_addons_raise(self):
path = 'cloudfiles'
self.project.delete_addon('github', Auth(self.user))
self.project.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_unauth_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_delete_action_creates_trashed_file_node(self):
file_node = self.get_test_file()
payload = {
'provider': file_node.provider,
'metadata': {
'path': '/test/Test',
'materialized': '/test/Test'
}
}
views.addon_delete_file_node(self=None, node=self.project, user=self.user, event_type='file_removed', payload=payload)
assert_false(GithubFileNode.load(file_node._id))
assert_true(TrashedFileNode.load(file_node._id))
def test_delete_action_for_folder_deletes_subfolders_and_creates_trashed_file_nodes(self):
file_node = self.get_test_file()
subfolder = GithubFolder(
name='folder',
node=self.project,
path='/test/folder/',
materialized_path='/test/folder/',
)
subfolder.save()
payload = {
'provider': file_node.provider,
'metadata': {
'path': '/test/',
'materialized': '/test/'
}
}
views.addon_delete_file_node(self=None, node=self.project, user=self.user, event_type='file_removed', payload=payload)
assert_false(GithubFileNode.load(subfolder._id))
assert_true(TrashedFileNode.load(file_node._id))
@mock.patch('website.archiver.tasks.archive')
def test_archived_from_url(self, mock_archive):
file_node = self.get_test_file()
second_file_node = self.get_second_test_file()
file_node.copied_from = second_file_node
registered_node = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
archived_from_url = views.get_archived_from_url(registered_node, file_node)
view_url = self.project.web_url_for('addon_view_or_download_file', provider=file_node.provider, path=file_node.copied_from._id)
assert_true(archived_from_url)
assert_urls_equal(archived_from_url, view_url)
@mock.patch('website.archiver.tasks.archive')
def test_archived_from_url_without_copied_from(self, mock_archive):
file_node = self.get_test_file()
registered_node = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
archived_from_url = views.get_archived_from_url(registered_node, file_node)
assert_false(archived_from_url)
@mock.patch('website.archiver.tasks.archive')
def test_copied_from_id_trashed(self, mock_archive):
file_node = self.get_test_file()
second_file_node = self.get_second_test_file()
file_node.copied_from = second_file_node
self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
trashed_node = second_file_node.delete()
assert_false(trashed_node.copied_from)
@mock.patch('website.archiver.tasks.archive')
def test_missing_modified_date_in_file_data(self, mock_archive):
file_node = self.get_test_file()
file_data = {
'name': 'Test File Update',
'materialized': file_node.materialized_path,
'modified': None
}
file_node.update(revision=None, data=file_data)
assert_equal(len(file_node.history), 1)
assert_equal(file_node.history[0], file_data)
class TestLegacyViews(OsfTestCase):
def setUp(self):
super(TestLegacyViews, self).setUp()
self.path = 'mercury.png'
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
file_record = self.node_addon.get_root().append_file(self.path)
self.expected_path = file_record._id
self.node_addon.save()
file_record.save()
def test_view_file_redirect(self):
url = '/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_redirect(self):
url = '/{0}/osffiles/{1}/download/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_version_redirect(self):
url = '/{0}/osffiles/{1}/version/3/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_version_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/version/3/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_no_provider_name(self):
url = '/{0}/files/{1}'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_action_as_param(self):
url = '/{}/osfstorage/files/{}/?action=download'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect(self):
url = '/project/{0}/mycooladdon/files/{1}/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.path,
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect_download(self):
url = '/project/{0}/mycooladdon/files/{1}/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.path,
action='download',
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
|
cwisecarver/osf.io
|
tests/test_addons.py
|
Python
|
apache-2.0
| 40,302
|
from .common import Common
from .vocabulary import ThreatDescriptor as td
from .vocabulary import ThreatExchange as t
class ThreatDescriptor(Common):
_URL = t.URL + t.VERSION + t.THREAT_DESCRIPTORS
_DETAILS = t.URL + t.VERSION
_RELATED = t.URL + t.VERSION
_fields = [
td.ADDED_ON,
td.CONFIDENCE,
td.DESCRIPTION,
td.EXPIRED_ON,
td.FIRST_ACTIVE,
td.ID,
td.INDICATOR,
td.LAST_ACTIVE,
td.LAST_UPDATED,
td.METADATA,
td.MY_REACTIONS,
td.OWNER,
td.PRECISION,
td.PRIVACY_MEMBERS,
td.PRIVACY_TYPE,
td.RAW_INDICATOR,
td.REVIEW_STATUS,
td.SEVERITY,
td.SHARE_LEVEL,
td.SOURCE_URI,
td.STATUS,
td.TAGS,
td.TYPE,
]
_default_fields = [
td.ADDED_ON,
td.CONFIDENCE,
td.DESCRIPTION,
td.EXPIRED_ON,
td.FIRST_ACTIVE,
td.ID,
td.INDICATOR,
td.LAST_ACTIVE,
td.LAST_UPDATED,
td.METADATA,
td.MY_REACTIONS,
td.OWNER,
td.PRECISION,
td.RAW_INDICATOR,
td.REVIEW_STATUS,
td.SEVERITY,
td.SHARE_LEVEL,
td.SOURCE_URI,
td.STATUS,
td.TAGS,
td.TYPE,
]
_connections = [
]
_unique = [
]
|
mgoffin/ThreatExchange
|
pytx/pytx/threat_descriptor.py
|
Python
|
bsd-3-clause
| 1,351
|
import urlparse
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_str
import jinja2
from jingo import register
from tower import ugettext as _, ugettext_lazy as _lazy
import mkt
from access import acl
from amo.helpers import impala_breadcrumbs
from mkt.developers.helpers import mkt_page_title
from mkt.reviewers.utils import (AppsReviewing, clean_sort_param,
create_sort_link, device_queue_search)
@register.function
@jinja2.contextfunction
def reviewers_breadcrumbs(context, queue=None, items=None):
"""
Wrapper function for ``breadcrumbs``. Prepends 'Editor Tools'
breadcrumbs.
**queue**
Explicit queue type to set.
**items**
list of [(url, label)] to be inserted after Add-on.
"""
crumbs = [(reverse('reviewers.home'), _('Reviewer Tools'))]
if queue:
queues = {'pending': _('Apps'),
'rereview': _('Re-reviews'),
'updates': _('Updates'),
'escalated': _('Escalations'),
'device': _('Device'),
'moderated': _('Moderated Reviews'),
'reviewing': _('Reviewing'),
'region': _('Regional Queues')}
if items:
url = reverse('reviewers.apps.queue_%s' % queue)
else:
# The Addon is the end of the trail.
url = None
crumbs.append((url, queues[queue]))
if items:
crumbs.extend(items)
return impala_breadcrumbs(context, crumbs, add_default=True)
@register.function
@jinja2.contextfunction
def reviewers_page_title(context, title=None, addon=None):
if addon:
title = u'%s | %s' % (title, addon.name)
else:
section = _lazy('Reviewer Tools')
title = u'%s | %s' % (title, section) if title else section
return mkt_page_title(context, title)
@register.function
@jinja2.contextfunction
def queue_tabnav(context):
"""
Returns tuple of tab navigation for the queue pages.
Each tuple contains three elements: (url, tab_code, tab_text)
"""
request = context['request']
counts = context['queue_counts']
apps_reviewing = AppsReviewing(request).get_apps()
# Apps.
if acl.action_allowed(request, 'Apps', 'Review'):
rv = [
(reverse('reviewers.apps.queue_pending'), 'pending',
_('Apps ({0})', counts['pending']).format(counts['pending'])),
(reverse('reviewers.apps.queue_rereview'), 'rereview',
_('Re-reviews ({0})', counts['rereview']).format(
counts['rereview'])),
(reverse('reviewers.apps.queue_updates'), 'updates',
_('Updates ({0})', counts['updates']).format(counts['updates'])),
]
if acl.action_allowed(request, 'Apps', 'ReviewEscalated'):
rv.append((reverse('reviewers.apps.queue_escalated'), 'escalated',
_('Escalations ({0})', counts['escalated']).format(
counts['escalated'])))
rv.extend([
(reverse('reviewers.apps.queue_moderated'), 'moderated',
_('Moderated Reviews ({0})', counts['moderated'])
.format(counts['moderated'])),
(reverse('reviewers.apps.apps_reviewing'), 'reviewing',
_('Reviewing ({0})').format(len(apps_reviewing))),
])
if acl.action_allowed(request, 'Apps', 'ReviewRegionCN'):
url_ = reverse('reviewers.apps.queue_region',
args=[mkt.regions.CN.slug])
rv.append((url_, 'region',
_('China ({0})').format(counts['region_cn'])))
else:
rv = []
if 'pro' in request.GET:
device_srch = device_queue_search(request)
rv.append((reverse('reviewers.apps.queue_device'), 'device',
_('Device ({0})').format(device_srch.count()),))
return rv
@register.function
@jinja2.contextfunction
def logs_tabnav(context):
"""
Returns tuple of tab navigation for the log pages.
Each tuple contains three elements: (named url, tab_code, tab_text)
"""
rv = [
('reviewers.apps.logs', 'apps', _('Reviews'))
]
return rv
@register.function
@jinja2.contextfunction
def sort_link(context, pretty_name, sort_field):
"""Get table header sort links.
pretty_name -- name displayed on table header
sort_field -- name of get parameter, referenced to in views
"""
request = context['request']
sort, order = clean_sort_param(request)
# Copy search/filter GET parameters.
get_params = [(k, v) for k, v in
urlparse.parse_qsl(smart_str(request.META['QUERY_STRING']))
if k not in ('sort', 'order')]
return create_sort_link(pretty_name, sort_field, get_params,
sort, order)
|
jinankjain/zamboni
|
mkt/reviewers/helpers.py
|
Python
|
bsd-3-clause
| 4,876
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Import produce and instatiate Objects using produce.Produce"""
import produce
TOMATO = produce.Produce()
EGGPLANT = produce.Produce(arrival=1311210802)
TOMATO_ARRIVAL = TOMATO.arrival
EGGPLANT_EXPIRES = EGGPLANT.get_expiration()
|
ModestoCabrera/is210-week-11-warmup
|
task_01.py
|
Python
|
mpl-2.0
| 279
|
import json
import os.path
from .. import config
DATA_DIR = os.path.dirname(__file__) + "/data/test_config"
EXPECTED_CONFIG = DATA_DIR + "/expected.json"
def test_load_config():
with open(EXPECTED_CONFIG, "r") as expected_f:
expected = json.load(expected_f)
actual = config.load_config(DATA_DIR)
# You can regenerate the expected file here, with:
# open(EXPECTED_CONFIG, "w").write(json.dumps(actual, indent=4))
assert expected == actual
|
wiki-ai/editquality
|
editquality/tests/test_config.py
|
Python
|
mit
| 476
|
#!/usr/bin/env python
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
MadeInHaus/django-social
|
example/SocialExample/manage.py
|
Python
|
mit
| 242
|
'''
Implement regular expression matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "a*") → true
isMatch("aa", ".*") → true
isMatch("ab", ".*") → true
isMatch("aab", "c*a*b") → true
'''
class Solution(object):
# Time Limit Exceeded!!!
def isMatch_timeout(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
# When the pattern is None
if not p:
return not s
# When the string is None, pattern like "a*" can still match it
if not s and p:
if 1 < len(p) and p[1] == "*":
return self.isMatch(s, p[2:])
else:
return False
# When the the second character of pattern is "*"
if 1 < len(p) and p[1] == "*":
# When the first character matches, there are three possible situation
if s[0] == p[0] or p[0] == ".":
return self.isMatch(s[1:], p) or \
self.isMatch(s[1:], p[2:]) or \
self.isMatch(s, p[2:])
# Ignore the first two characters in pattern
else:
return self.isMatch(s, p[2:])
else:
if s[0] == p[0] or p[0] == ".":
return self.isMatch(s[1:], p[1:])
else:
return False
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
m = len(s)
n = len(p)
# Init dp
dp = [[False for i in range(n + 1)] for i in range(m + 1)]
# When string and pattern are all None
dp[m][n] = True
# When the string is None, pattern like "a*" can still match it
for i in range(n - 1, -1, -1):
if p[i] == "*":
dp[m][i] = dp[m][i + 1]
elif i + 1 < n and p[i + 1] == "*":
dp[m][i] = dp[m][i + 1]
else:
dp[m][i] = False
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
# When the current character is "*"
if p[j] == "*":
if j - 1 >= 0 and p[j - 1] != "*":
dp[i][j] = dp[i][j + 1]
# If the pattern is starting with "*" or has "**" in it
else:
return False
# When the the second character of pattern is "*"
elif j + 1 < n and p[j + 1] == "*":
# When the current character matches, there are three possible situation
# 1. ".*" matches nothing
# 2. "c*" matches more than one character
# 3. "c*" just matches one character
if s[i] == p[j] or p[j] == ".":
dp[i][j] = dp[i][j + 2] or dp[i + 1][j] or dp[i + 1][j + 2]
# Ignore the first two characters("c*") in pattern since they cannot match
# the current character in string
else:
dp[i][j] = dp[i][j + 2]
else:
# When the current character is matched
if s[i] == p[j] or p[j] == ".":
dp[i][j] = dp[i + 1][j + 1]
else:
dp[i][j] = False
return dp[0][0]
if __name__ == "__main__":
assert Solution().isMatch("aa", "a") == False
assert Solution().isMatch("aa", "aa") == True
assert Solution().isMatch("aaa", "aa") == False
assert Solution().isMatch("aa", "a*") == True
assert Solution().isMatch("aa", ".*") == True
assert Solution().isMatch("ab", ".*") == True
assert Solution().isMatch("aab", "c*a*b") == True
|
gavinfish/leetcode-share
|
python/010 Regular Expression Matching.py
|
Python
|
mit
| 4,127
|
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 20), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
SherMary/python_training
|
generator/group.py
|
Python
|
apache-2.0
| 1,001
|
#! /usr/bin/env python
# -*- coding:Utf-8 -*-
from exercice_10_18 import voyelle
def compteVoyelles(chu):
"compte les voyelles présentes dans la chaîne unicode chu"
n = 0
for c in chu:
if voyelle(c):
n = n + 1
return n
# Test :
if __name__ == '__main__':
phrase ="Maître corbeau sur un arbre perché"
nv = compteVoyelles(phrase.decode("Utf8"))
print "La phrase", phrase, "compte", nv, "voyelles."
|
widowild/messcripts
|
exercice/python2/solutions/exercice_10_19.py
|
Python
|
gpl-3.0
| 451
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import event_mass_edit
|
CLVsol/clvsol_odoo_addons
|
clv_event_history/wizard/__init__.py
|
Python
|
agpl-3.0
| 205
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import inspect
import os
import shutil
import tempfile
import unittest
import pytest
from datetime import date, datetime, timedelta
from babel import support
from babel.messages import Catalog
from babel.messages.mofile import write_mo
from babel._compat import BytesIO
@pytest.mark.usefixtures('os_environ')
class TranslationsTestCase(unittest.TestCase):
def setUp(self):
# Use a locale which won't fail to run the tests
os.environ['LANG'] = 'en_US.UTF-8'
messages1 = [
('foo', {'string': 'Voh'}),
('foo', {'string': 'VohCTX', 'context': 'foo'}),
(('foo1', 'foos1'), {'string': ('Voh1', 'Vohs1')}),
(('foo1', 'foos1'), {'string': ('VohCTX1', 'VohsCTX1'), 'context': 'foo'}),
]
messages2 = [
('foo', {'string': 'VohD'}),
('foo', {'string': 'VohCTXD', 'context': 'foo'}),
(('foo1', 'foos1'), {'string': ('VohD1', 'VohsD1')}),
(('foo1', 'foos1'), {'string': ('VohCTXD1', 'VohsCTXD1'), 'context': 'foo'}),
]
catalog1 = Catalog(locale='en_GB', domain='messages')
catalog2 = Catalog(locale='en_GB', domain='messages1')
for ids, kwargs in messages1:
catalog1.add(ids, **kwargs)
for ids, kwargs in messages2:
catalog2.add(ids, **kwargs)
catalog1_fp = BytesIO()
catalog2_fp = BytesIO()
write_mo(catalog1_fp, catalog1)
catalog1_fp.seek(0)
write_mo(catalog2_fp, catalog2)
catalog2_fp.seek(0)
translations1 = support.Translations(catalog1_fp)
translations2 = support.Translations(catalog2_fp, domain='messages1')
self.translations = translations1.add(translations2, merge=False)
def assertEqualTypeToo(self, expected, result):
self.assertEqual(expected, result)
assert type(expected) == type(result), "instance type's do not " + \
'match: %r!=%r' % (type(expected), type(result))
def test_pgettext(self):
self.assertEqualTypeToo('Voh', self.translations.gettext('foo'))
self.assertEqualTypeToo('VohCTX', self.translations.pgettext('foo',
'foo'))
def test_upgettext(self):
self.assertEqualTypeToo(u'Voh', self.translations.ugettext('foo'))
self.assertEqualTypeToo(u'VohCTX', self.translations.upgettext('foo',
'foo'))
def test_lpgettext(self):
self.assertEqualTypeToo(b'Voh', self.translations.lgettext('foo'))
self.assertEqualTypeToo(b'VohCTX', self.translations.lpgettext('foo',
'foo'))
def test_npgettext(self):
self.assertEqualTypeToo('Voh1',
self.translations.ngettext('foo1', 'foos1', 1))
self.assertEqualTypeToo('Vohs1',
self.translations.ngettext('foo1', 'foos1', 2))
self.assertEqualTypeToo('VohCTX1',
self.translations.npgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo('VohsCTX1',
self.translations.npgettext('foo', 'foo1',
'foos1', 2))
def test_unpgettext(self):
self.assertEqualTypeToo(u'Voh1',
self.translations.ungettext('foo1', 'foos1', 1))
self.assertEqualTypeToo(u'Vohs1',
self.translations.ungettext('foo1', 'foos1', 2))
self.assertEqualTypeToo(u'VohCTX1',
self.translations.unpgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(u'VohsCTX1',
self.translations.unpgettext('foo', 'foo1',
'foos1', 2))
def test_lnpgettext(self):
self.assertEqualTypeToo(b'Voh1',
self.translations.lngettext('foo1', 'foos1', 1))
self.assertEqualTypeToo(b'Vohs1',
self.translations.lngettext('foo1', 'foos1', 2))
self.assertEqualTypeToo(b'VohCTX1',
self.translations.lnpgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(b'VohsCTX1',
self.translations.lnpgettext('foo', 'foo1',
'foos1', 2))
def test_dpgettext(self):
self.assertEqualTypeToo(
'VohD', self.translations.dgettext('messages1', 'foo'))
self.assertEqualTypeToo(
'VohCTXD', self.translations.dpgettext('messages1', 'foo', 'foo'))
def test_dupgettext(self):
self.assertEqualTypeToo(
u'VohD', self.translations.dugettext('messages1', 'foo'))
self.assertEqualTypeToo(
u'VohCTXD', self.translations.dupgettext('messages1', 'foo', 'foo'))
def test_ldpgettext(self):
self.assertEqualTypeToo(
b'VohD', self.translations.ldgettext('messages1', 'foo'))
self.assertEqualTypeToo(
b'VohCTXD', self.translations.ldpgettext('messages1', 'foo', 'foo'))
def test_dnpgettext(self):
self.assertEqualTypeToo(
'VohD1', self.translations.dngettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
'VohsD1', self.translations.dngettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
'VohCTXD1', self.translations.dnpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
'VohsCTXD1', self.translations.dnpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_dunpgettext(self):
self.assertEqualTypeToo(
u'VohD1', self.translations.dungettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
u'VohsD1', self.translations.dungettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
u'VohCTXD1', self.translations.dunpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
u'VohsCTXD1', self.translations.dunpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_ldnpgettext(self):
self.assertEqualTypeToo(
b'VohD1', self.translations.ldngettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
b'VohsD1', self.translations.ldngettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
b'VohCTXD1', self.translations.ldnpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
b'VohsCTXD1', self.translations.ldnpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_load(self):
tempdir = tempfile.mkdtemp()
try:
messages_dir = os.path.join(tempdir, 'fr', 'LC_MESSAGES')
os.makedirs(messages_dir)
catalog = Catalog(locale='fr', domain='messages')
catalog.add('foo', 'bar')
with open(os.path.join(messages_dir, 'messages.mo'), 'wb') as f:
write_mo(f, catalog)
translations = support.Translations.load(tempdir, locales=('fr',), domain='messages')
self.assertEqual('bar', translations.gettext('foo'))
finally:
shutil.rmtree(tempdir)
class NullTranslationsTestCase(unittest.TestCase):
def setUp(self):
fp = BytesIO()
write_mo(fp, Catalog(locale='de'))
fp.seek(0)
self.translations = support.Translations(fp=fp)
self.null_translations = support.NullTranslations(fp=fp)
def method_names(self):
return [name for name in dir(self.translations) if 'gettext' in name]
def test_same_methods(self):
for name in self.method_names():
if not hasattr(self.null_translations, name):
self.fail('NullTranslations does not provide method %r' % name)
def test_method_signature_compatibility(self):
for name in self.method_names():
translations_method = getattr(self.translations, name)
null_method = getattr(self.null_translations, name)
signature = inspect.getargspec
self.assertEqual(signature(translations_method),
signature(null_method))
def test_same_return_values(self):
data = {
'message': u'foo', 'domain': u'domain', 'context': 'tests',
'singular': u'bar', 'plural': u'baz', 'num': 1,
'msgid1': u'bar', 'msgid2': u'baz', 'n': 1,
}
for name in self.method_names():
method = getattr(self.translations, name)
null_method = getattr(self.null_translations, name)
signature = inspect.getargspec(method)
parameter_names = [name for name in signature[0] if name != 'self']
values = [data[name] for name in parameter_names]
self.assertEqual(method(*values), null_method(*values))
class LazyProxyTestCase(unittest.TestCase):
def test_proxy_caches_result_of_function_call(self):
self.counter = 0
def add_one():
self.counter += 1
return self.counter
proxy = support.LazyProxy(add_one)
self.assertEqual(1, proxy.value)
self.assertEqual(1, proxy.value)
def test_can_disable_proxy_cache(self):
self.counter = 0
def add_one():
self.counter += 1
return self.counter
proxy = support.LazyProxy(add_one, enable_cache=False)
self.assertEqual(1, proxy.value)
self.assertEqual(2, proxy.value)
def test_format_date():
fmt = support.Format('en_US')
assert fmt.date(date(2007, 4, 1)) == 'Apr 1, 2007'
def test_format_datetime():
from pytz import timezone
fmt = support.Format('en_US', tzinfo=timezone('US/Eastern'))
when = datetime(2007, 4, 1, 15, 30)
assert fmt.datetime(when) == 'Apr 1, 2007, 11:30:00 AM'
def test_format_time():
from pytz import timezone
fmt = support.Format('en_US', tzinfo=timezone('US/Eastern'))
assert fmt.time(datetime(2007, 4, 1, 15, 30)) == '11:30:00 AM'
def test_format_timedelta():
fmt = support.Format('en_US')
assert fmt.timedelta(timedelta(weeks=11)) == '3 months'
def test_format_number():
fmt = support.Format('en_US')
assert fmt.number(1099) == '1,099'
def test_format_decimal():
fmt = support.Format('en_US')
assert fmt.decimal(1.2345) == '1.234'
def test_format_percent():
fmt = support.Format('en_US')
assert fmt.percent(0.34) == '34%'
def test_lazy_proxy():
def greeting(name='world'):
return u'Hello, %s!' % name
lazy_greeting = support.LazyProxy(greeting, name='Joe')
assert str(lazy_greeting) == u'Hello, Joe!'
assert u' ' + lazy_greeting == u' Hello, Joe!'
assert u'(%s)' % lazy_greeting == u'(Hello, Joe!)'
greetings = [
support.LazyProxy(greeting, 'world'),
support.LazyProxy(greeting, 'Joe'),
support.LazyProxy(greeting, 'universe'),
]
greetings.sort()
assert [str(g) for g in greetings] == [
u'Hello, Joe!',
u'Hello, universe!',
u'Hello, world!',
]
|
hoosteeno/fjord
|
vendor/packages/Babel-2.1.1/tests/test_support.py
|
Python
|
bsd-3-clause
| 12,386
|
# Copyright (c) 2009, Andrew McNabb
from errno import EINTR
from copy import deepcopy
import os
import select
import signal
import sys
import datetime
import cPickle
from psshlib.askpass_server import PasswordServer
from psshlib import psshutil
from psshlib.ui import ProgressBar, ask_yes_or_no, clear_line, print_task_report, print_summary
from psshlib.exceptions import FatalError
from psshlib.output import Writer, SshTaskDatabase
READ_SIZE = 1 << 16
class Manager(object):
"""Executes tasks concurrently.
Tasks are added with add_task() and executed in parallel with run().
Returns a list of the exit statuses of the processes.
Arguments:
limit: Maximum number of commands running at once.
timeout: Maximum allowed execution time in seconds.
"""
def __init__(self, opts):
self.opts = opts
self.limit = opts.par
self.timeout = opts.timeout
self.askpass = opts.askpass
self.outdir = opts.outdir
self.errdir = opts.errdir
self.iomap = IOMap()
self.taskcount = 0
self.tasks = []
self.running = []
self.done = []
self.succeeded = []
self.ssh_failed = []
self.cmd_failed = []
self.killed = []
self.askpass_socket = None
self.progress_bar = opts.progress_bar
self.test_cases = opts.test_cases
def _setup_progress_bar(self):
""" This should be called after ``self.tasks`` is populated
"""
if self.progress_bar:
self.progress_bar = ProgressBar(len(self.tasks))
def _split_manager(self):
# set up the test manager and add first n tasks
new_opts = deepcopy(self.opts)
new_opts.__dict__['test_cases'] = None # remove test_cases option, or there'll be a recursion error
new_opts.__dict__['summary'] = None # don't print summary now, do it later
test_man = self.__class__(new_opts)
map(test_man.add_task, self.tasks[slice(0, self.test_cases)])
psshutil.run_manager(test_man)
test_man.tally_results()
print
while True:
answer = ask_yes_or_no("Paused run. OK to continue").lower()
if answer == 'y':
break
elif answer == 'n':
sys.exit(0)
print
finish_man = self.__class__(new_opts)
# add remaining tasks
map(finish_man.add_task, self.tasks[slice(self.test_cases, None)])
psshutil.run_manager(finish_man)
return test_man, finish_man
def run(self):
"""Processes tasks previously added with add_task."""
self._setup_progress_bar()
if self.test_cases and self.test_cases < len(self.tasks):
man1, man2 = self._split_manager()
self.done = man1.done + man2.done
else:
self._run()
self.tally_results()
if self.opts.summary:
print_summary(self.succeeded, self.ssh_failed, self.killed, self.cmd_failed)
if self.opts.fork_hosts:
failed_file = open(self.opts.fork_hosts + '.failed.lst', 'w')
passed_file = open(self.opts.fork_hosts + '.passed.lst', 'w')
for i in self.ssh_failed + self.killed + self.cmd_failed:
failed_file.write(i.host + '\n')
for i in self.succeeded:
passed_file.write(i.host + '\n')
return [task.exitstatus for task in self.done]
def _run(self):
try:
if self.outdir or self.errdir:
writer = Writer(self.outdir, self.errdir)
writer.start()
else:
writer = None
self._acquire_password()
self.set_sigchld_handler()
try:
self.update_tasks(writer)
wait = None
while self.running or self.tasks:
# Opt for efficiency over subsecond timeout accuracy.
if wait is None or wait < 1:
wait = 1
self.iomap.poll(wait)
self.update_tasks(writer)
wait = self.check_timeout()
except KeyboardInterrupt:
# This exception handler tries to clean things up and prints
# out a nice status message for each interrupted host.
self.interrupted()
except KeyboardInterrupt:
# This exception handler doesn't print out any fancy status
# information--it just stops.
pass
if writer:
writer.signal_quit()
writer.join()
def _acquire_password(self):
if self.askpass:
pass_server = PasswordServer()
pass_server.start(self.iomap, self.limit)
self.askpass_socket = pass_server.address
def tally_results(self):
for task in self.done:
if task.exitstatus < 0:
self.killed.append(task)
elif task.exitstatus == 255:
self.ssh_failed.append(task)
elif task.exitstatus != 0:
self.cmd_failed.append(task)
else:
self.succeeded.append(task)
def clear_sigchld_handler(self):
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def set_sigchld_handler(self):
# TODO: find out whether set_wakeup_fd still works if the default
# signal handler is used (I'm pretty sure it doesn't work if the
# signal is ignored).
signal.signal(signal.SIGCHLD, self.handle_sigchld)
# This should keep reads and writes from getting EINTR.
if hasattr(signal, 'siginterrupt'):
signal.siginterrupt(signal.SIGCHLD, False)
def handle_sigchld(self, number, frame):
"""Apparently we need a sigchld handler to make set_wakeup_fd work."""
# Write to the signal pipe (only for Python <2.5, where the
# set_wakeup_fd method doesn't exist).
if self.iomap.wakeup_writefd:
os.write(self.iomap.wakeup_writefd, '\0')
for task in self.running:
if task.proc:
task.proc.poll()
# Apparently some UNIX systems automatically resent the SIGCHLD
# handler to SIG_DFL. Reset it just in case.
self.set_sigchld_handler()
def add_task(self, task):
"""Adds a Task to be processed with run()."""
self.tasks.append(task)
def update_tasks(self, writer):
"""Reaps tasks and starts as many new ones as allowed."""
# Mask signals to work around a Python bug:
# http://bugs.python.org/issue1068268
# Since sigprocmask isn't in the stdlib, clear the SIGCHLD handler.
# Since signals are masked, reap_tasks needs to be called once for
# each loop.
keep_running = True
while keep_running:
self.clear_sigchld_handler()
self._start_tasks_once(writer)
self.set_sigchld_handler()
keep_running = self.reap_tasks()
def _start_tasks_once(self, writer):
"""Starts tasks once.
Due to http://bugs.python.org/issue1068268, signals must be masked
when this method is called.
"""
while 0 < len(self.tasks) and len(self.running) < self.limit:
task = self.tasks.pop(0)
self.running.append(task)
task.start(self.taskcount, self.iomap, writer, self.askpass_socket)
self.taskcount += 1
def reap_tasks(self):
"""Checks to see if any tasks have terminated.
After cleaning up, returns the number of tasks that finished.
"""
still_running = []
finished_count = 0
for task in self.running:
if task.running():
still_running.append(task)
else:
self.finished(task)
finished_count += 1
self.running = still_running
return finished_count
def check_timeout(self):
"""Kills timed-out processes and returns the lowest time left."""
if self.timeout <= 0:
return None
min_timeleft = None
for task in self.running:
timeleft = self.timeout - task.elapsed()
if timeleft <= 0:
task.timedout()
continue
if min_timeleft is None or timeleft < min_timeleft:
min_timeleft = timeleft
if min_timeleft is None:
return 0
else:
return max(0, min_timeleft)
def interrupted(self):
"""Cleans up after a keyboard interrupt."""
for task in self.running:
task.interrupted()
self.finished(task)
for task in self.tasks:
task.cancel()
self.finished(task)
def finished(self, task):
"""Marks a task as complete and reports its status to stdout."""
self.done.append(task)
task.sequence = len(self.done)
if self.progress_bar:
self.progress_bar.tick()
else:
print_task_report(task)
class ScpManager(Manager):
def tally_results(self):
for task in self.done:
if task.exitstatus < 0:
self.killed.append(task)
elif task.exitstatus != 0:
self.ssh_failed.append(task)
else:
self.succeeded.append(task)
class SshManager(Manager):
def run(self):
super(SshManager, self).run()
if self.opts.sqlite_db:
sys.stdout.write('Exporting to database "%s".\n' % self.opts.sqlite_db)
db = SshTaskDatabase(self.opts.sqlite_db)
map(db.capture_data, self.done)
db.close()
if self.opts.pickle_file:
sys.stdout.write('Exporting to pickle file "%s".\n' % self.opts.pickle_file)
fd = open(self.opts.pickle_file, 'a')
cPickle.dump(self, fd, cPickle.HIGHEST_PROTOCOL)
fd.close()
sys.stdout.write('\n')
def __reduce__(self): # for pickling task data
return (list, tuple(), None, (i.get_data() for i in self.done))
class IOMap(object):
"""A manager for file descriptors and their associated handlers.
The poll method dispatches events to the appropriate handlers.
"""
def __init__(self):
self.readmap = {}
self.writemap = {}
# Setup the wakeup file descriptor to avoid hanging on lost signals.
wakeup_readfd, wakeup_writefd = os.pipe()
self.register_read(wakeup_readfd, self.wakeup_handler)
# TODO: remove test when we stop supporting Python <2.5
if hasattr(signal, 'set_wakeup_fd'):
signal.set_wakeup_fd(wakeup_writefd)
self.wakeup_writefd = None
else:
self.wakeup_writefd = wakeup_writefd
def register_read(self, fd, handler):
"""Registers an IO handler for a file descriptor for reading."""
self.readmap[fd] = handler
def register_write(self, fd, handler):
"""Registers an IO handler for a file descriptor for writing."""
self.writemap[fd] = handler
def unregister(self, fd):
"""Unregisters the given file descriptor."""
if fd in self.readmap:
del self.readmap[fd]
if fd in self.writemap:
del self.writemap[fd]
def poll(self, timeout=None):
"""Performs a poll and dispatches the resulting events."""
if not self.readmap and not self.writemap:
return
rlist = list(self.readmap)
wlist = list(self.writemap)
try:
rlist, wlist, _ = select.select(rlist, wlist, [], timeout)
except select.error:
_, e, _ = sys.exc_info()
errno = e.args[0]
if errno == EINTR:
return
else:
raise
for fd in rlist:
handler = self.readmap[fd]
handler(fd, self)
for fd in wlist:
handler = self.writemap[fd]
handler(fd, self)
def wakeup_handler(self, fd, iomap):
"""Handles read events on the signal wakeup pipe.
This ensures that SIGCHLD signals aren't lost.
"""
try:
os.read(fd, READ_SIZE)
except (OSError, IOError):
_, e, _ = sys.exc_info()
errno, message = e.args
if errno != EINTR:
sys.stderr.write('Fatal error reading from wakeup pipe: %s\n'
% message)
raise FatalError
|
jorik041/parallel-ssh
|
psshlib/manager.py
|
Python
|
bsd-3-clause
| 12,648
|
import pandas as pd
import datetime
import numpy as np
from sklearn import preprocessing, cross_validation, svm, linear_model
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
from sklearn.feature_selection import RFE, RFECV
import os
os.system('clear')
"""Found at http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html"""
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
"""This method is purely for checking that the logic works"""
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
"""This method converts the calculated probability to a spread"""
def spread_conversion(x):
"""
http://www.bettingtalk.com/win-probability-percentage-point-spread-nfl-nba/
"""
home_dog = False
if x < 0.5:
x = 1.0 - x
home_dog = True
if x < .513:
return_val = -0.5
elif x < .525:
return_val = -1
elif x < .535:
return_val = -1.5
elif x < .545:
return_val = -2
elif x < .594:
return_val = -2.5
elif x < .643:
return_val = -3
elif x < .658:
return_val = -3.5
elif x < .673:
return_val = -4
elif x < .681:
return_val = -4.5
elif x < .69:
return_val = -5
elif x < .707:
return_val = -5.5
elif x < .724:
return_val = -6
elif x < .752:
return_val = -6.5
elif x < .781:
return_val = -7
elif x < .791:
return_val = -7.5
elif x < .802:
return_val = -8
elif x < .807:
return_val = -8.5
elif x < .811:
return_val = -9
elif x < .836:
return_val = -9.5
elif x < .86:
return_val = -10
elif x < .871:
return_val = -10.5
elif x < .882:
return_val = -11
elif x < .885:
return_val = -11.5
elif x < .887:
return_val = -12
elif x < .893:
return_val = -12.5
elif x < .9:
return_val = -13
elif x < .924:
return_val = -13.5
elif x < .949:
return_val = -14
elif x < .956:
return_val = -14.5
elif x < .963:
return_val = -15
elif x < .981:
return_val = -15.5
elif x < .998:
return_val = -16
else:
return_val = -16.5
if home_dog == True:
return float(-return_val)
else:
return float(return_val)
C_vec = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 6.0, 10.0]
prob_val = 0
C_val = 0
# Read in csv table
df = pd.read_csv('nflscraper/command_results.csv')
# Calculate offensive efficiency (pass yards per attempt and rush yards per carry)
home_pass_stats = pd.DataFrame(df['hpass_tot'].str.split('-').tolist(),columns="completions attempts yards touchdowns interceptions".split())
away_pass_stats = pd.DataFrame(df['apass_tot'].str.split('-').tolist(),columns="completions attempts yards touchdowns interceptions".split())
away_pass_stats.replace('neg7','-7',inplace=True)
home_pass_stats['yards'] = home_pass_stats['yards'].astype(float)
home_pass_stats['attempts'] = home_pass_stats['attempts'].astype(float)
away_pass_stats['yards'] = away_pass_stats.yards.astype(float)
away_pass_stats['attempts'] = away_pass_stats.attempts.astype(float)
df['home_pass'] = home_pass_stats['yards'] / home_pass_stats['attempts']
df['away_pass'] = away_pass_stats['yards'] / away_pass_stats['attempts']
del home_pass_stats, away_pass_stats
df['home_rush'].astype(float)
df['hrush_att'].astype(float)
df['away_rush'].astype(float)
df['arush_att'].astype(float)
df['home_rush'] = df['home_rush'] / df['hrush_att']
df['away_rush'] = df['away_rush'] / df['arush_att']
# Change the string date data in df to datetime format
df['game_date'] = pd.to_datetime(df['game_date'],format='%Y-%m-%d')
""" Create home time of possession differential """
# Convert objects to datetime values
df['home_poss'] = pd.to_datetime(df['home_poss'],format='%M:%S')
df['away_poss'] = pd.to_datetime(df['away_poss'],format='%M:%S')
# Convert datetime values to fractions of an hour
df['home_poss'] = df['home_poss'].dt.minute / 60.0 + df['home_poss'].dt.second / 3600.0
df['away_poss'] = df['away_poss'].dt.minute / 60.0 + df['away_poss'].dt.second / 3600.0
# Find total possession time (only really matters because games can go to overtime)
# And re-weight time of possession
df['total_poss'] = df['home_poss'] + df['away_poss']
df['home_poss'] = df['home_poss'] / df['total_poss']
df['away_poss'] = df['away_poss'] / df['total_poss']
df.drop('total_poss',axis = 1, inplace=True) # Delete total possession column as it's no longer needed
""" Set date after which you don't want to use data to train classifier """
cutoff_date = datetime.date(2016, 8, 1)
""" Creating prediction dataset """
# Create dataset to be used for prediction
predicting_set = df[df.game_date >= cutoff_date].set_index('game_date')
predicting_set = predicting_set.sort_index() # Sort ascending by index
predicting_set['week'] = np.nan # Create column indicating which game week the team is in
# Populating the week column of predicting_set
start_date = predicting_set.index.min().date() # Find the first game date
end_date = predicting_set.index.max().date() # Find the last game date
date_val = start_date # date_val is the value used to cycle through the datetime objects
week_val = 1 # week_val is the iterating value that tracks the "current" week
week_dict={} # Create an empty dictionary
# Cycle through the number of days
for _ in range((end_date - start_date).days+1):
if date_val.weekday() == 1: # Check if the weekday value is a Tuesday
week_val += 1 # Increment since I'm considering Tuesday to start a new week
week_dict[date_val] = week_val # Update the dictionary value for the "current" date
date_val += datetime.timedelta(days=1) # Increment date_val by one day
# week_dict[datetime.date(2016,2,7)] = 21 # This is manually done since there are two weeks between conf championships and the final game
# Update the week column in the dataframe with the dictionary values and then delete the dictionary and its components used to create it
predicting_set['week'].update(pd.Series(week_dict))
predicting_set['vegasline'].replace('Pick','0',inplace=True)
del week_dict, start_date, end_date, date_val, week_val
# Manually call the names of the columns from the scraped data
home_columns = ['home_four','home_oyds','home_pass','home_pens','home_poss','home_rush','home_sack','home_score','home_team','home_third','home_turn','hpens_yds','hsack_yds','hpass_tot','hrush_att']
away_columns = ['apens_yds','asack_yds','away_four','away_oyds','away_pass','away_pens','away_poss','away_rush','away_sack','away_score','away_team','away_third','away_turn','apass_tot','arush_att']
# Create a mapping to combine home and away columns
home_cols = {'game week': 'game week', 'home_four': 'fourth down', 'home_oyds': 'total yards', 'home_pass': 'pass yards', 'home_pens': 'penalties', 'home_poss': 'possession', 'home_rush': 'rush yards',
'home_sack': 'sacks', 'home_score': 'score', 'home_team': 'team', 'home_third': 'third down', 'home_turn': 'turnovers', 'hpens_yds': 'penalty yards', 'hsack_yds': 'sack yards',
'vegasline': 'spread', 'overunder': 'total score'}
away_cols = {'game week': 'game week', 'away_four': 'fourth down', 'away_oyds': 'total yards', 'away_pass': 'pass yards', 'away_pens': 'penalties', 'away_poss': 'possession', 'away_rush': 'rush yards',
'away_sack': 'sacks', 'away_score': 'score', 'away_team': 'team', 'away_third': 'third down', 'away_turn': 'turnovers', 'apens_yds': 'penalty yards', 'asack_yds': 'sack yards',
'vegasline': 'spread', 'overunder': 'total score'}
# Create only home and away dataframes
away = predicting_set.drop(home_columns,axis=1)
home = predicting_set.drop(away_columns,axis=1)
away.drop('apass_tot',axis=1,inplace=True)
away.drop('arush_att',axis=1,inplace=True)
away.drop('away_oyds',axis=1,inplace=True)
home.drop('hpass_tot',axis=1,inplace=True)
home.drop('hrush_att',axis=1,inplace=True)
home.drop('home_oyds',axis=1,inplace=True)
# Create home and away scores which will be used to compare to the predicted value
away_score = predicting_set[['away_score','week']]
home_score = predicting_set[['home_score','week']]
# Remove all games not included in the prediction
away_score = away_score[away_score.week >= 4]
home_score = home_score[home_score.week >= 4]
# Drop the 'week' column as it is no longer needed
away_score.drop('week',axis=1,inplace=True)
home_score.drop('week',axis=1,inplace=True)
# Rename the columns in these dataframes removing the home and away modifier according to the mapping
away.rename(columns=away_cols,inplace=True)
home.rename(columns=home_cols,inplace=True)
# Sort the rows chronologically
away.sort_index(axis=1,inplace=True)
home.sort_index(axis=1,inplace=True)
away_score.sort_index(inplace=True)
home_score.sort_index(inplace=True)
# Pull the actual spreads from the scraped data
spreads = home[home['week'] >= 4]
spreads = spreads['spread'].str.split().str[-1]
spreads = pd.to_numeric(spreads)
home.drop(['spread','total score'],axis=1,inplace=True)
away.drop(['spread','total score'],axis=1,inplace=True)
# Group both home and away stats into one dataframe
total_set = home.append(away)
# Create a list of the 32 teams
team_list = total_set['team'].unique()
# This loop will pull in all data and calculate running averages
for week in range(4,5):
# Initialize a "temporary" dataframe for each loop
weekly_stats = pd.DataFrame(columns=total_set.columns.values)
# Iterate through each team in the list
for team in team_list:
# Pull data only for the team being addressed in each iteration
mask_team = total_set[total_set['team'] == team]
# Pull data only for the weeks of interest
mask_week = mask_team[mask_team['week'] < week]
# Append the "temporary" dataframe with this pulled data
weekly_stats = weekly_stats.append(mask_week)
# Calculate the mean of all data for each team
weekly_stats = weekly_stats.groupby('team').mean()
# Reset the week value to equal the week that will be used (for example, week 5 will have all averaged data from weeks 1-4)
weekly_stats['week'] = week
# The "total_stats" set is created from weekly_stats in order to have the same column values, and only needs to be done for the first week
if week == 4:
total_stats = weekly_stats
# For all other weeks simply append the "temporary" values
else:
total_stats = total_stats.append(weekly_stats)
# Create the needed columns for the eventual prediction
matchup_columns = ['week','home_team','away_team', 'sack_diff', 'sack_ydiff', 'pens_diff', 'poss_diff', 'third_diff', 'turn_diff', 'pass_diff', 'rush_diff', 'fourth_diff']
# matchup_columns = ['week','home_team','away_team', 'poss_diff', 'third_diff', 'turn_diff', 'pass_diff', 'rush_diff']
# Create the DataFrame and pull in the 'week' 'home_team' and 'away_team columns
matchups = pd.DataFrame(columns = matchup_columns)
matchups[['week','home_team','away_team']] = predicting_set[['week','home_team','away_team']]
# Remove any results from the first four weeks
matchups = matchups[matchups.week >= 4]
# Create the actual differential values using averages from each week
for row in range(len(matchups)):
h_team = matchups.iloc[row]['home_team']
a_team = matchups.iloc[row]['away_team']
week = matchups.iloc[row]['week']
# pass_diff
h_pass = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['pass yards'].values[0]
a_pass = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['pass yards'].values[0]
matchups.ix[row,'pass_diff'] = h_pass - a_pass
# rush_diff
h_rush = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['rush yards'].values[0]
a_rush = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['rush yards'].values[0]
matchups.ix[row, 'rush_diff'] = h_rush - a_rush
# sack_diff
h_sack = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['sacks'].values[0]
a_sack = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['sacks'].values[0]
matchups.ix[row, 'sack_diff'] = h_sack - a_sack
#sack_ydiff
h_sack_yds = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['sack yards'].values[0]
a_sack_yds = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['sack yards'].values[0]
matchups.ix[row, 'sack_ydiff'] = h_sack_yds - a_sack_yds
# pens_diff
h_pen_yds = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['penalty yards'].values[0]
a_pen_yds = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['penalty yards'].values[0]
matchups.ix[row, 'pens_diff'] = h_pen_yds - a_pen_yds
# poss_diff
h_poss = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['possession'].values[0]
a_poss = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['possession'].values[0]
matchups.ix[row, 'poss_diff'] = h_poss - a_poss
# third_diff
h_third = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['third down'].values[0]
a_third = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['third down'].values[0]
matchups.ix[row, 'third_diff'] = h_third - a_third
# fourth_diff
h_fourth = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['fourth down'].values[0]
a_fourth = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['fourth down'].values[0]
matchups.ix[row, 'fourth_diff'] = h_fourth - a_fourth
# turn_diff
h_turn = total_stats[((total_stats.index.values == h_team) & (total_stats.week == week))]['turnovers'].values[0]
a_turn = total_stats[((total_stats.index.values == a_team) & (total_stats.week == week))]['turnovers'].values[0]
matchups.ix[row, 'turn_diff'] = h_turn - a_turn
""" Create the testing set for the algo creation """
# Remove the predicting set from the dataframe
df = df[df.game_date < cutoff_date]
# Fill NaNs with outlier values
df.fillna(-999999, inplace=True)
# Calculate time of possession differential
df['poss_diff'] = df['home_poss'] - df['away_poss']
# Calculate third down percentage differential
df['third_diff'] = df['home_third'] - df['away_third']
# Calculate third down percentage differential
df['fourth_diff'] = df['home_four'] - df['away_four']
# Calculate turnover differential
df['turn_diff'] = df['home_turn'] - df['away_turn']
# Calculate sack quantity differential
df['sack_diff'] = df['home_sack'] - df['away_sack']
# Calculate sack yards differential
df['sack_ydiff'] = df['hsack_yds'] - df['asack_yds']
# Calculate penalty yards differential
df['pens_diff'] = df['hpens_yds'] - df['apens_yds']
# Calculate passing yardage differential
df['pass_diff'] = df['home_pass'] - df['away_pass']
# Calculate rushing yardage differential
df['rush_diff'] = df['home_rush'] - df['away_rush']
# Create a sample set to pass into the machine learning algorithm
X = df[['sack_diff', 'sack_ydiff', 'pens_diff', 'poss_diff', 'third_diff', 'turn_diff', 'pass_diff', 'rush_diff', 'fourth_diff']].copy()
# X = df[['poss_diff', 'third_diff', 'turn_diff', 'pass_diff', 'rush_diff']].copy()
# Create results vector (a home win = 1, a home loss or tie = 0)
y = np.array(np.where(df['home_score'] > df['away_score'], 1, 0))
""" Train, test, and predict the algorithm """
# Scale the sample data
scaler = preprocessing.StandardScaler().fit(X)
X = scaler.transform(X)
# Delete the dataframe to clear memory
del df
# Split out training and testing data sets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)
print matchups['home_team']
# Remove the 'week' 'home_team' and 'away_team' columns from matchups as they are not used in the algorithm
matchups.drop(['week', 'home_team', 'away_team'], axis=1, inplace=True)
'''You'll likely want to use the a pickled model from previous regression predicting 2015 results'''
for feat in range(1,len(matchups.columns)):
for c in C_vec:
# Create the classifier and check the score
# clf = LogisticRegression()
clf = linear_model.LogisticRegression(C=c,random_state=42)
selector = RFE(clf)
selector = selector.fit(X_train,y_train)
# Calculate probabilities using the predict_proba method for logistic regression
probabilities = selector.predict_proba(scaler.transform(matchups))
# Vectorize the spread_conversion function and apply the function to the probabilities result vector
vfunc = np.vectorize(spread_conversion)
predicted_spreads = np.apply_along_axis(vfunc,0,probabilities[:,0])
# If the actual line for the home team is lower than the predicted line then you would take the away team, otherwise take the home team
bet_vector = np.array(np.where(predicted_spreads > spreads,0,1))
# Create the actual result vector where a tie counts as a loss for the home team
game_result = np.array(np.where(home_score.ix[:,0] + predicted_spreads[:] > away_score.ix[:,0], 1, 0))
# Check to see where the bet_vector equals the actual game result with the spread included
result = np.array(np.where(bet_vector == game_result,1,0))
prob_result = float(np.sum(result)) / len(result)
# print 'Number of features =', feat, 'C =',c,' Percent correct =',prob_result
if prob_result > prob_val:
prob_val = prob_result
C_val = c
feat_val = feat
print 'Score =',selector.score(X_test,y_test)
# print prob_val, C_val, feat
clf = linear_model.LogisticRegression(C=C_val,random_state=42)
clf = clf.fit(X_train,y_train)
probabilities = clf.predict_proba(scaler.transform(matchups))
vfunc = np.vectorize(spread_conversion)
predicted_spreads = np.apply_along_axis(vfunc,0,probabilities[:,0])
bet_vector = np.array(np.where(predicted_spreads > spreads,0,1))
print spreads
print predicted_spreads
print bet_vector
|
JVP3122/Python-Machine-Learning-NFL-Game-Predictor
|
v1/simple_regression.py
|
Python
|
gpl-3.0
| 19,830
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-13 21:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0038_pruebanivel'),
]
operations = [
migrations.AlterField(
model_name='curso',
name='tipo_evaluacion',
field=models.DecimalField(choices=[(1, 'Trimestral'), (2, 'Elementary/Pre Intermediate'), (3, 'Intermediate'), (4, 'Upper/[Pre]First/Advance/Proficiency')], decimal_places=0, max_digits=1),
),
]
|
Etxea/gestioneide
|
gestioneide/migrations/0039_auto_20170213_2202.py
|
Python
|
gpl-3.0
| 606
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.conftest import as_admin
from tests.conftest import as_user
from tests.conftest import set_user
SAVE_VARIANTS = [
({}, {}, 400, {"msg": "Please provide a valid CVE ID or Git commit link."}),
({"id": "CVE-1970-3000"}, {}, 404, {"msg": "Please create an entry first"}),
({"id": "CVE-1970-2000"}, {}, 404, {"msg": "Entry has no linked Git link!"}),
({"id": "CVE-1970-1000"}, {}, 200, {"msg": "Update successful."}),
(
{"id": "CVE-1970-1000"},
[
{
"path": "/etc/passwd",
"hash": "12345678",
"name": "passwd",
"comments": [
{
"row_from": 1,
"row_to": 10,
"text": "a comment",
"sort_pos": 0,
}
],
"markers": [
{
"row_from": 1,
"row_to": 10,
"column_from": 1,
"column_to": 10,
"class": "vulnerableMarker",
}
],
},
],
200,
{"msg": "Update successful."},
),
]
@pytest.mark.integration
@pytest.mark.parametrize("query, data, expected_code, expected_response", SAVE_VARIANTS)
def test_save_editor_data(client, query, data, expected_code, expected_response):
resp = client.post("/api/save_editor_data", json=data, query_string=query)
assert resp.status_code == 403
assert "application/json" in resp.headers["Content-Type"]
assert b"Forbidden" in resp.data
@pytest.mark.integration
@pytest.mark.parametrize("query, data, expected_code, expected_response", SAVE_VARIANTS)
def test_save_editor_data_as_admin(
app, client, query, data, expected_code, expected_response
):
as_admin(client)
resp = client.post("/api/save_editor_data", json=data, query_string=query)
assert resp.status_code == expected_code
assert "application/json" in resp.headers["Content-Type"]
assert resp.json == expected_response
@pytest.mark.integration
@pytest.mark.parametrize("query, data, expected_code, expected_response", SAVE_VARIANTS)
def test_save_editor_data_as_user(
app, client, query, data, expected_code, expected_response
):
with set_user(app, as_user(client)):
resp = client.post("/api/save_editor_data", json=data, query_string=query)
assert resp.status_code == 403
assert "application/json" in resp.headers["Content-Type"]
assert b"Forbidden" in resp.data
|
google/vulncode-db
|
tests/app_tests/api/test_routes.py
|
Python
|
apache-2.0
| 3,216
|
"""
This component provides HA sensor support for Ring Door Bell/Chimes.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ring/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from . import ATTRIBUTION, DATA_RING, DEFAULT_ENTITY_NAMESPACE
DEPENDENCIES = ['ring']
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
# Sensor types: Name, category, units, icon, kind
SENSOR_TYPES = {
'battery': [
'Battery', ['doorbell', 'stickup_cams'], '%', 'battery-50', None],
'last_activity': [
'Last Activity', ['doorbell', 'stickup_cams'], None, 'history', None],
'last_ding': [
'Last Ding', ['doorbell'], None, 'history', 'ding'],
'last_motion': [
'Last Motion', ['doorbell', 'stickup_cams'], None,
'history', 'motion'],
'volume': [
'Volume', ['chime', 'doorbell', 'stickup_cams'], None,
'bell-ring', None],
'wifi_signal_category': [
'WiFi Signal Category', ['chime', 'doorbell', 'stickup_cams'], None,
'wifi', None],
'wifi_signal_strength': [
'WiFi Signal Strength', ['chime', 'doorbell', 'stickup_cams'], 'dBm',
'wifi', None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE):
cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Ring device."""
ring = hass.data[DATA_RING]
sensors = []
for device in ring.chimes: # ring.chimes is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'chime' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring.doorbells: # ring.doorbells is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'doorbell' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring.stickup_cams: # ring.stickup_cams is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'stickup_cams' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
add_entities(sensors, True)
return True
class RingSensor(Entity):
"""A sensor implementation for Ring device."""
def __init__(self, hass, data, sensor_type):
"""Initialize a sensor for Ring device."""
super(RingSensor, self).__init__()
self._sensor_type = sensor_type
self._data = data
self._extra = None
self._icon = 'mdi:{}'.format(SENSOR_TYPES.get(self._sensor_type)[3])
self._kind = SENSOR_TYPES.get(self._sensor_type)[4]
self._name = "{0} {1}".format(
self._data.name, SENSOR_TYPES.get(self._sensor_type)[0])
self._state = None
self._tz = str(hass.config.time_zone)
self._unique_id = '{}-{}'.format(self._data.id, self._sensor_type)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs['device_id'] = self._data.id
attrs['firmware'] = self._data.firmware
attrs['kind'] = self._data.kind
attrs['timezone'] = self._data.timezone
attrs['type'] = self._data.family
attrs['wifi_name'] = self._data.wifi_name
if self._extra and self._sensor_type.startswith('last_'):
attrs['created_at'] = self._extra['created_at']
attrs['answered'] = self._extra['answered']
attrs['recording_status'] = self._extra['recording']['status']
attrs['category'] = self._extra['kind']
return attrs
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == 'battery' and self._state is not None:
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[2]
def update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Pulling data from %s sensor", self._name)
self._data.update()
if self._sensor_type == 'volume':
self._state = self._data.volume
if self._sensor_type == 'battery':
self._state = self._data.battery_life
if self._sensor_type.startswith('last_'):
history = self._data.history(limit=5,
timezone=self._tz,
kind=self._kind,
enforce_limit=True)
if history:
self._extra = history[0]
created_at = self._extra['created_at']
self._state = '{0:0>2}:{1:0>2}'.format(
created_at.hour, created_at.minute)
if self._sensor_type == 'wifi_signal_category':
self._state = self._data.wifi_signal_category
if self._sensor_type == 'wifi_signal_strength':
self._state = self._data.wifi_signal_strength
|
jamespcole/home-assistant
|
homeassistant/components/ring/sensor.py
|
Python
|
apache-2.0
| 6,215
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
import ipaddress
from st2common.log import logging
LOG = logging.getLogger(__name__)
__all__ = ["is_ipv4", "is_ipv6", "split_host_port"]
BRACKET_PATTERN = r"^\[.*\]" # IPv6 bracket pattern to specify port
COMPILED_BRACKET_PATTERN = re.compile(BRACKET_PATTERN)
HOST_ONLY_IN_BRACKET = r"^\[.*\]$"
COMPILED_HOST_ONLY_IN_BRACKET_PATTERN = re.compile(HOST_ONLY_IN_BRACKET)
def is_ipv6(ip_str):
"""
Validate whether given string is IPv6.
:param ip_str: String to validate.
:type ip_str: ``str``
:rtype: ``bool``
"""
try:
addr = ipaddress.ip_address(ip_str)
return addr.version == 6
except:
return False
def is_ipv4(ip_str):
"""
Validate whether given string is IPv4.
:param ip_str: String to validate.
:type ip_str: ``str``
:rtype: ``bool``
"""
try:
addr = ipaddress.ip_address(ip_str)
return addr.version == 4
except:
return False
def split_host_port(host_str):
"""
Split host_str into host and port.
Can handle IPv4, IPv6, hostname inside or outside brackets.
Note: If you want to specify a port with IPv6, you definitely
should enclose IP address within [].
:param host_str: Host port string.
:type host_str: ``str``
:return: Hostname (string), port (int) tuple. Raises exception on invalid port.
:rtype: ``tuple`` of ``str`` and ``int``
"""
hostname = host_str
port = None
# If it's simple IPv6 or IPv4 address, return here.
if is_ipv6(host_str) or is_ipv4(host_str):
return (hostname, port)
# Check if it's square bracket style.
match = COMPILED_BRACKET_PATTERN.match(host_str)
if match:
LOG.debug("Square bracket style.")
# Check if square bracket style no port.
match = COMPILED_HOST_ONLY_IN_BRACKET_PATTERN.match(host_str)
if match:
hostname = match.group().strip("[]")
return (hostname, port)
hostname, separator, port = hostname.rpartition(":")
try:
LOG.debug(
"host_str: %s, hostname: %s port: %s" % (host_str, hostname, port)
)
port = int(port)
hostname = hostname.strip("[]")
return (hostname, port)
except:
raise Exception("Invalid port %s specified." % port)
else:
LOG.debug("Non-bracket address. host_str: %s" % host_str)
if ":" in host_str:
LOG.debug("Non-bracket with port.")
hostname, separator, port = hostname.rpartition(":")
try:
port = int(port)
return (hostname, port)
except:
raise Exception("Invalid port %s specified." % port)
return (hostname, port)
|
StackStorm/st2
|
st2common/st2common/util/ip_utils.py
|
Python
|
apache-2.0
| 3,449
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
def serialize_ip_network_group(group):
"""Serialize group to JSON-like object"""
return {
'id': group.id,
'name': group.name,
'identifier': 'IPNetworkGroup:{}'.format(group.id),
'_type': 'IPNetworkGroup'
}
|
mvidalgarcia/indico
|
indico/modules/networks/util.py
|
Python
|
mit
| 506
|
from django.core.urlresolvers import reverse
from django.test import SimpleTestCase
class URLEndpointTestCase(SimpleTestCase):
def test_get_root_view(self):
url = reverse('homepage')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_get_editor_view(self):
url = reverse('editor')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_get_publishing_view(self):
url = reverse('upload_article')
response = self.client.get(url)
self.assertEqual(405, response.status_code)
def test_post_publishing_all_required_fields(self):
url = reverse('upload_article')
response = self.client.post(url, data={
'article': 'test content',
'title': 'test title',
'description': 'test description'
})
self.assertEqual(500, response.status_code)
def test_post_publishing_missing_article(self):
url = reverse('upload_article')
response = self.client.post(url, data={
'title': 'test title',
'description': 'test description'
})
self.assertEqual(400, response.status_code)
def test_post_publishing_empty_article(self):
url = reverse('upload_article')
response = self.client.post(url, data={
'article': '',
'title': 'test title',
'description': 'test description'
})
self.assertEqual(500, response.status_code)
def test_get_article_list_view(self):
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_get_article_detail_view(self):
url = reverse('article', args=(1,))
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_get_search_view(self):
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_get_about_view(self):
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
|
microserv/frontend
|
editor_backend/editor_backend/tests.py
|
Python
|
mit
| 2,241
|
# coding=utf-8
# Copyright 2021 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Lookup Embedding module."""
import typing
from typing import Dict
from ehr_prediction_modeling.embeddings import input_embedding_base
from ehr_prediction_modeling.models.nets import sparse_lookup
import tensorflow.compat.v1 as tf
if typing.TYPE_CHECKING:
from ehr_prediction_modeling import configdict
class BasicEmbeddingLookup(input_embedding_base.InputEmbeddingBase):
"""The class that uses the base embedding lookup."""
def __init__(self, encoder_config: "configdict.ConfigDict",
emb_dim_dict: Dict[str, int]):
"""Initialize the embedding base object."""
super().__init__(encoder_config, emb_dim_dict)
with tf.variable_scope(None, default_name=self._name) as variable_scope:
self.variable_scope = variable_scope
# Initialize all the weights.
self._initialize_weights()
def _initialize_weights(self):
"""Initialize the weights for embedding computations."""
for feat_type in self._all_features:
self._encoders[feat_type] = sparse_lookup.SparseLookupEncoder(
ndim_emb=self._embed_dim_dict[feat_type],
ndim_input=self._config.ndim_dict[feat_type],
n_act=self._config.nact_dict[feat_type],
sparse_lookup_dropout_prob=self._config.get(
"sparse_lookup_dropout_prob", 0.0),
dropout_is_training=self._is_training,
sparse_combine=self._config.sparse_combine,
name="lookup_embedding_" + feat_type,
identity_lookup=feat_type in self._config.identity_lookup_features)
|
google/ehr-predictions
|
ehr_prediction_modeling/embeddings/basic_embedding_lookup.py
|
Python
|
apache-2.0
| 2,155
|
import itertools
import sys
input_filename = sys.argv[1]
with open(input_filename) as f:
input = f.read()
checksum = 0
for line in input.split('\n'):
line = line.strip()
for a, b in itertools.product(line.split(), repeat=2):
if a == b:
continue
a = int(a)
b = int(b)
if a % b == 0:
checksum += a // b
break
print(checksum)
|
mofr/advent-of-code
|
2017/day02.py
|
Python
|
apache-2.0
| 407
|
#!/usr/bin/env python3
# rFactor .scn/.gen file manipulation tool
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import sys
import rfactortools
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='rFactor .veh/.gen processor')
parser.add_argument('FILE', action='store', type=str, nargs='+',
help='.veh file or directory containing .veh files')
parser.add_argument('-t', '--tree', action='store_true', default=False,
help="print tree")
args = parser.parse_args()
files = []
for path in args.FILE:
if os.path.isdir(path):
files += rfactortools.find_files(path, ".veh")
else:
files.append(path)
vehs = [rfactortools.parse_vehfile(filename) for filename in files]
if args.tree:
rfactortools.print_veh_tree(vehs, sys.stdout)
else:
rfactortools.print_veh_info(vehs, sys.stdout)
# EOF #
|
Grumbel/rfactortools
|
vehtool.py
|
Python
|
gpl-3.0
| 1,619
|
from importlib import import_module
from fabric.api import env
from fabric.api import cd, run, settings, sudo
from fabric.contrib.files import append, comment, exists
from .deploy import AllowedException, checkout_branch, deploy, get_repo_dir, WEBADMIN_GROUP
env.use_ssh_config = True
REPO_FULL_NAME = 'GitHubUser/GitHubRepo'
def setup_user(user, no_sudo_passwd='', public_key_file=''):
from plush.fabric_commands import prepare_user
messages = prepare_user(user, WEBADMIN_GROUP, add_sudo=True, no_sudo_passwd=bool(no_sudo_passwd))
add_authorized_key(user, public_key_file)
if not exists('/usr/bin/createuser'):
_install_packages(['postgresql'])
matching_user_count = sudo("psql postgres -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{0}'\"".format(user),
user='postgres')
if '1' not in matching_user_count:
sudo('createuser -s {0}'.format(user), user='postgres')
if messages:
print("========================================")
print(messages)
print("========================================")
def add_authorized_key(user, public_key_file):
import plush.fabric_commands
if public_key_file:
with open(public_key_file, 'r') as public_key:
public_key_contents = public_key.read()
plush.fabric_commands.add_authorized_key(user, public_key_contents)
def disable_ssh_passwords():
sshd_config = '/etc/ssh/sshd_config'
comment(sshd_config, '^ *PasswordAuthentication', use_sudo=True)
append(sshd_config, 'PasswordAuthentication no', use_sudo=True)
print("========================================")
print("Password authentication disabled for SSH.")
print("Restart the SSH daemon by logging into the console and running:")
print("sudo service ssh restart")
print("Alternatively, reboot the server if console access isn't readily available.")
print("========================================")
def setup_server(setup_wins=''):
from plush.fabric_commands.permissions import make_directory
sudo('add-apt-repository universe')
sudo('apt-get update')
base_packages = [
'git',
'python3-venv',
'postgresql',
'python3-psycopg2',
'nginx',
'uwsgi',
'uwsgi-plugin-python3',
]
_install_packages(base_packages)
if setup_wins:
_setup_wins()
sudo('mkdir -p /etc/nginx/ssl')
make_directory(WEBADMIN_GROUP, '/var/www')
make_directory(WEBADMIN_GROUP, '/var/www/python')
with settings(abort_exception=AllowedException):
try:
run('createuser -s root')
except AllowedException:
pass
make_directory('root', '/var/uwsgi', '777')
default_site = '/etc/nginx/sites-enabled/default'
if exists(default_site):
sudo('rm {0}'.format(default_site))
sudo('/etc/init.d/nginx start')
def _install_packages(packages):
for package in packages:
sudo('apt-get install --yes {0}'.format(package))
def _setup_wins():
wins_packages = [
'samba',
'smbclient',
'winbind',
]
_install_packages(wins_packages)
sudo('sed -i s/\'hosts:.*/hosts: files dns wins/\' /etc/nsswitch.conf')
resolved_config = '/etc/systemd/resolved.conf'
comment(resolved_config, '^ *Domains', use_sudo=True)
append(resolved_config, 'Domains=localdomain', use_sudo=True)
sudo('service systemd-resolved restart')
def setup_deployment(config, branch=''):
django_settings = import_module('newdjangosite.settings_{0}'.format(config))
db_settings = django_settings.DATABASES['default']
db_name = db_settings['NAME']
db_user = db_settings['USER']
db_password = db_settings['PASSWORD']
repo_dir = get_repo_dir(config)
database_created = False
with settings(abort_exception=AllowedException):
try:
run('createdb --encoding=UTF8 --locale=en_US.UTF-8 --owner=postgres --template=template0 {0}'.format(db_name))
database_created = True
except AllowedException:
pass
with settings(abort_exception=AllowedException):
try:
run('createuser -d -R -S {0}'.format(db_user))
except AllowedException:
pass
run('psql -d postgres -c \"ALTER ROLE {0} WITH ENCRYPTED PASSWORD \'{1}\';\"'.format(db_user, db_password))
_setup_repo(repo_dir)
checkout_branch(repo_dir, config, branch)
with cd(repo_dir):
if not exists('venv'):
run('python3 -m venv --system-site-packages venv')
global_dir = '{0}/config/ubuntu-18.04/global'.format(repo_dir)
with cd(global_dir):
uwsgi_socket = '/etc/systemd/system/uwsgi-app@.socket'
uwsgi_service = '/etc/systemd/system/uwsgi-app@.service'
if not exists(uwsgi_socket):
from plush.fabric_commands.permissions import set_permissions_file
sudo('cp uwsgi-app@.socket {0}'.format(uwsgi_socket))
set_permissions_file(uwsgi_socket, 'root', 'root', '644')
if not exists(uwsgi_service):
from plush.fabric_commands.permissions import set_permissions_file
sudo('cp uwsgi-app@.service {0}'.format(uwsgi_service))
set_permissions_file(uwsgi_service, 'root', 'root', '644')
deploy(config, branch)
if database_created:
with cd(repo_dir):
run('venv/bin/python web/manage_{0}.py createsuperuser'.format(config))
def _setup_repo(repo_dir):
from plush.fabric_commands.permissions import make_directory
make_directory(WEBADMIN_GROUP, repo_dir)
if not exists('{0}/.git'.format(repo_dir)):
from plush.fabric_commands.git import clone
from plush.fabric_commands.ssh_key import create_key
from plush.oauth_flow import verify_access_token
from plush.repo_keys import add_repo_key
if not verify_access_token():
raise Exception('Unable to access GitHub account')
create_key(REPO_FULL_NAME, WEBADMIN_GROUP)
add_repo_key(REPO_FULL_NAME)
clone(REPO_FULL_NAME, repo_dir)
|
kbarnes3/BaseDjangoSite
|
web/fabric_utils/setup.py
|
Python
|
bsd-2-clause
| 6,296
|
#!/usr/bin/python3
import asyncore
import socket
import time
import random
class EchoHandler(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(8192)
print(data)
#self.send(data)
class EchoServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is None:
return
else:
sock, addr = pair
print('Incoming connection from %s' % repr(addr))
handler = EchoHandler(sock)
if __name__ == '__main__':
ip_address = input("ip:")
server = EchoServer(ip_address, 8080)
asyncore.loop()
|
elaeon/sensors
|
tests/test_server.py
|
Python
|
gpl-2.0
| 870
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2015 Niklas Fiekas <niklas.fiekas@tu-clausthal.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import chess
import chess.polyglot
import chess.pgn
import chess.uci
import chess.syzygy
import chess.gaviota
import collections
import os.path
import textwrap
import sys
import time
import unittest
import logging
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class SquareTestCase(unittest.TestCase):
def test_square(self):
for square in chess.SQUARES:
file_index = chess.file_index(square)
rank_index = chess.rank_index(square)
self.assertEqual(chess.square(file_index, rank_index), square, chess.SQUARE_NAMES[square])
class MoveTestCase(unittest.TestCase):
def test_equality(self):
a = chess.Move(chess.A1, chess.A2)
b = chess.Move(chess.A1, chess.A2)
c = chess.Move(chess.H7, chess.H8, chess.BISHOP)
d1 = chess.Move(chess.H7, chess.H8)
d2 = chess.Move(chess.H7, chess.H8)
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertEqual(d1, d2)
self.assertNotEqual(a, c)
self.assertNotEqual(c, d1)
self.assertNotEqual(b, d1)
self.assertFalse(d1 != d2)
def test_uci_parsing(self):
self.assertEqual(chess.Move.from_uci("b5c7").uci(), "b5c7")
self.assertEqual(chess.Move.from_uci("e7e8q").uci(), "e7e8q")
class PieceTestCase(unittest.TestCase):
def test_equality(self):
a = chess.Piece(chess.BISHOP, chess.WHITE)
b = chess.Piece(chess.KING, chess.BLACK)
c = chess.Piece(chess.KING, chess.WHITE)
d1 = chess.Piece(chess.BISHOP, chess.WHITE)
d2 = chess.Piece(chess.BISHOP, chess.WHITE)
self.assertEqual(a, d1)
self.assertEqual(d1, a)
self.assertEqual(d1, d2)
self.assertEqual(repr(a), repr(d1))
self.assertNotEqual(a, b)
self.assertNotEqual(b, c)
self.assertNotEqual(b, d1)
self.assertNotEqual(a, c)
self.assertFalse(d1 != d2)
self.assertNotEqual(repr(a), repr(b))
self.assertNotEqual(repr(b), repr(c))
self.assertNotEqual(repr(b), repr(d1))
self.assertNotEqual(repr(a), repr(c))
def test_from_symbol(self):
white_knight = chess.Piece.from_symbol("N")
self.assertEqual(white_knight.color, chess.WHITE)
self.assertEqual(white_knight.piece_type, chess.KNIGHT)
self.assertEqual(white_knight.symbol(), "N")
black_queen = chess.Piece.from_symbol("q")
self.assertEqual(black_queen.color, chess.BLACK)
self.assertEqual(black_queen.piece_type, chess.QUEEN)
self.assertEqual(black_queen.symbol(), "q")
class BoardTestCase(unittest.TestCase):
def test_default_position(self):
board = chess.Board()
self.assertEqual(board.piece_at(chess.B1), chess.Piece.from_symbol("N"))
self.assertEqual(board.fen(), chess.STARTING_FEN)
self.assertEqual(board.turn, chess.WHITE)
def test_empty(self):
board = chess.Board.empty()
self.assertEqual(board.fen(), "8/8/8/8/8/8/8/8 w - - 0 1")
self.assertEqual(board, chess.Board(None))
def test_from_epd(self):
base_epd = "rnbqkb1r/ppp1pppp/5n2/3P4/8/8/PPPP1PPP/RNBQKBNR w KQkq -"
board, ops = chess.Board.from_epd(base_epd + " ce 55;")
self.assertEqual(ops["ce"], 55)
self.assertEqual(board.fen(), base_epd + " 0 1")
def test_move_making(self):
board = chess.Board()
move = chess.Move(chess.E2, chess.E4)
board.push(move)
self.assertEqual(board.peek(), move)
def test_fen(self):
board = chess.Board()
self.assertEqual(board.fen(), chess.STARTING_FEN)
fen = "6k1/pb3pp1/1p2p2p/1Bn1P3/8/5N2/PP1q1PPP/6K1 w - - 0 24"
board.set_fen(fen)
self.assertEqual(board.fen(), fen)
board.push(chess.Move.from_uci("f3d2"))
self.assertEqual(board.fen(), "6k1/pb3pp1/1p2p2p/1Bn1P3/8/8/PP1N1PPP/6K1 b - - 0 24")
def test_xfen(self):
# https://de.wikipedia.org/wiki/Forsyth-Edwards-Notation#Beispiel
xfen = "rn2k1r1/ppp1pp1p/3p2p1/5bn1/P7/2N2B2/1PPPPP2/2BNK1RR w Gkq - 4 11"
board = chess.Board("rn2k1r1/ppp1pp1p/3p2p1/5bn1/P7/2N2B2/1PPPPP2/2BNK1RR w Gkq - 4 11")
self.assertEqual(board.castling_rights, chess.BB_G1 | chess.BB_A8 | chess.BB_G8)
self.assertEqual(board.shredder_fen(), "rn2k1r1/ppp1pp1p/3p2p1/5bn1/P7/2N2B2/1PPPPP2/2BNK1RR w Gga - 4 11")
self.assertEqual(board.fen(), xfen)
self.assertTrue(board.has_castling_rights(chess.WHITE))
self.assertTrue(board.has_castling_rights(chess.BLACK))
self.assertTrue(board.has_kingside_castling_rights(chess.BLACK))
self.assertTrue(board.has_kingside_castling_rights(chess.WHITE))
self.assertTrue(board.has_queenside_castling_rights(chess.BLACK))
self.assertFalse(board.has_queenside_castling_rights(chess.WHITE))
# Chess960 position #284.
board = chess.Board("rkbqrbnn/pppppppp/8/8/8/8/PPPPPPPP/RKBQRBNN w - - 0 1")
board.castling_rights = board.rooks
self.assertEqual(board.fen(), "rkbqrbnn/pppppppp/8/8/8/8/PPPPPPPP/RKBQRBNN w KQkq - 0 1")
self.assertEqual(board.shredder_fen(), "rkbqrbnn/pppppppp/8/8/8/8/PPPPPPPP/RKBQRBNN w EAea - 0 1")
# Valid en passant square on illegal board.
fen = "8/8/8/pP6/8/8/8/8 w - a6 0 1"
board = chess.Board(fen)
self.assertEqual(board.fen(), fen)
# Illegal en passant square in illegal board.
fen = "1r6/8/8/pP6/8/8/8/1K6 w - a6 0 1"
board = chess.Board(fen)
self.assertEqual(board.fen(), "1r6/8/8/pP6/8/8/8/1K6 w - - 0 1")
def test_get_set(self):
board = chess.Board()
self.assertEqual(board.piece_at(chess.B1), chess.Piece.from_symbol("N"))
board.remove_piece_at(chess.E2)
self.assertEqual(board.piece_at(chess.E2), None)
board.set_piece_at(chess.E4, chess.Piece.from_symbol("r"))
self.assertEqual(board.piece_type_at(chess.E4), chess.ROOK)
def test_pawn_captures(self):
board = chess.Board()
# Kings gambit.
board.push(chess.Move.from_uci("e2e4"))
board.push(chess.Move.from_uci("e7e5"))
board.push(chess.Move.from_uci("f2f4"))
# Accepted.
exf4 = chess.Move.from_uci("e5f4")
self.assertTrue(exf4 in board.pseudo_legal_moves)
self.assertTrue(exf4 in board.legal_moves)
board.push(exf4)
board.pop()
def test_pawn_move_generation(self):
board = chess.Board("8/2R1P3/8/2pp4/2k1r3/P7/8/1K6 w - - 1 55")
self.assertEqual(len(list(board.generate_pseudo_legal_moves())), 16)
def test_single_step_pawn_move(self):
board = chess.Board()
a3 = chess.Move.from_uci("a2a3")
self.assertTrue(a3 in board.pseudo_legal_moves)
self.assertTrue(a3 in board.legal_moves)
board.push(a3)
board.pop()
self.assertEqual(board.fen(), chess.STARTING_FEN)
def test_castling(self):
board = chess.Board("r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 1 1")
# Let white castle short.
move = board.parse_san("O-O")
self.assertEqual(move, chess.Move.from_uci("e1g1"))
self.assertEqual(board.san(move), "O-O")
self.assertTrue(move in board.legal_moves)
board.push(move)
# Let black castle long.
move = board.parse_san("O-O-O")
self.assertEqual(board.san(move), "O-O-O")
self.assertTrue(move in board.legal_moves)
board.push(move)
self.assertEqual(board.fen(), "2kr3r/8/8/8/8/8/8/R4RK1 w - - 3 2")
# Undo both castling moves.
board.pop()
board.pop()
self.assertEqual(board.fen(), "r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 1 1")
# Let white castle long.
move = board.parse_san("O-O-O")
self.assertEqual(board.san(move), "O-O-O")
self.assertTrue(move in board.legal_moves)
board.push(move)
# Let black castle short.
move = board.parse_san("O-O")
self.assertEqual(board.san(move), "O-O")
self.assertTrue(move in board.legal_moves)
board.push(move)
self.assertEqual(board.fen(), "r4rk1/8/8/8/8/8/8/2KR3R w - - 3 2")
# Undo both castling moves.
board.pop()
board.pop()
self.assertEqual(board.fen(), "r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 1 1")
def test_ninesixty_castling(self):
fen = "3rk2r/4p3/8/8/8/8/8/4RKR1 w GEhd - 1 1"
board = chess.Board("3rk2r/4p3/8/8/8/8/8/4RKR1 w GEhd - 1 1")
# Let white do the king side swap.
move = board.parse_san("O-O")
self.assertEqual(board.san(move), "O-O")
self.assertEqual(move.from_square, chess.F1)
self.assertEqual(move.to_square, chess.G1)
self.assertTrue(move in board.legal_moves)
board.push(move)
self.assertEqual(board.shredder_fen(), "3rk2r/4p3/8/8/8/8/8/4RRK1 b hd - 2 1")
# Black can not castly kingside.
self.assertFalse(chess.Move.from_uci("e8h8") in board.legal_moves)
# Let black castle queenside.
move = board.parse_san("O-O-O")
self.assertEqual(board.san(move), "O-O-O")
self.assertEqual(move.from_square, chess.E8)
self.assertEqual(move.to_square, chess.D8)
self.assertTrue(move in board.legal_moves)
board.push(move)
self.assertEqual(board.shredder_fen(), "2kr3r/4p3/8/8/8/8/8/4RRK1 w - - 3 2")
# Restore initial position.
board.pop()
board.pop()
self.assertEqual(board.shredder_fen(), fen)
fen = "Qr4k1/4pppp/8/8/8/8/8/R5KR w Hb - 0 1"
board = chess.Board(fen)
# White can just hop the rook over.
move = board.parse_san("O-O")
self.assertEqual(board.san(move), "O-O")
self.assertEqual(move.from_square, chess.G1)
self.assertEqual(move.to_square, chess.H1)
self.assertTrue(move in board.legal_moves)
board.push(move)
self.assertEqual(board.shredder_fen(), "Qr4k1/4pppp/8/8/8/8/8/R4RK1 b b - 1 1")
# Black can not castle queenside nor kingside.
self.assertFalse(any(board.generate_castling_moves()))
# Restore initial position.
board.pop()
self.assertEqual(board.shredder_fen(), fen)
def test_castling_right_not_destroyed_bug(self):
# A rook move from H8 to H1 was only taking whites possible castling
# rights away.
board = chess.Board("2r1k2r/2qbbpp1/p2pp3/1p3PP1/Pn2P3/1PN1B3/1P3QB1/1K1R3R b k - 0 22")
board.push_san("Rxh1")
self.assertEqual(board.epd(), "2r1k3/2qbbpp1/p2pp3/1p3PP1/Pn2P3/1PN1B3/1P3QB1/1K1R3r w - -")
def test_insufficient_material(self):
# Starting position.
board = chess.Board()
self.assertFalse(board.is_insufficient_material())
# King vs. King + 2 bishops of the same color.
board = chess.Board("k1K1B1B1/8/8/8/8/8/8/8 w - - 7 32")
self.assertTrue(board.is_insufficient_material())
# Add bishop of opposite color for the weaker side.
board.set_piece_at(chess.B8, chess.Piece.from_symbol("b"))
self.assertFalse(board.is_insufficient_material())
def test_promotion_with_check(self):
board = chess.Board("8/6P1/2p5/1Pqk4/6P1/2P1RKP1/4P1P1/8 w - - 0 1")
board.push(chess.Move.from_uci("g7g8q"))
self.assertTrue(board.is_check())
self.assertEqual(board.fen(), "6Q1/8/2p5/1Pqk4/6P1/2P1RKP1/4P1P1/8 b - - 0 1")
board = chess.Board("8/8/8/3R1P2/8/2k2K2/3p4/r7 b - - 0 82")
board.push_san("d1=Q+")
self.assertEqual(board.fen(), "8/8/8/3R1P2/8/2k2K2/8/r2q4 w - - 0 83")
def test_scholars_mate(self):
board = chess.Board()
e4 = chess.Move.from_uci("e2e4")
self.assertTrue(e4 in board.legal_moves)
board.push(e4)
e5 = chess.Move.from_uci("e7e5")
self.assertTrue(e5 in board.legal_moves)
board.push(e5)
Qf3 = chess.Move.from_uci("d1f3")
self.assertTrue(Qf3 in board.legal_moves)
board.push(Qf3)
Nc6 = chess.Move.from_uci("b8c6")
self.assertTrue(Nc6 in board.legal_moves)
board.push(Nc6)
Bc4 = chess.Move.from_uci("f1c4")
self.assertTrue(Bc4 in board.legal_moves)
board.push(Bc4)
Rb8 = chess.Move.from_uci("a8b8")
self.assertTrue(Rb8 in board.legal_moves)
board.push(Rb8)
self.assertFalse(board.is_check())
self.assertFalse(board.is_checkmate())
self.assertFalse(board.is_game_over())
self.assertFalse(board.is_stalemate())
Qf7_mate = chess.Move.from_uci("f3f7")
self.assertTrue(Qf7_mate in board.legal_moves)
board.push(Qf7_mate)
self.assertTrue(board.is_check())
self.assertTrue(board.is_checkmate())
self.assertTrue(board.is_game_over())
self.assertFalse(board.is_stalemate())
self.assertEqual(board.fen(), "1rbqkbnr/pppp1Qpp/2n5/4p3/2B1P3/8/PPPP1PPP/RNB1K1NR b KQk - 0 4")
def test_san(self):
# Castling with check.
fen = "rnbk1b1r/ppp2pp1/5n1p/4p1B1/2P5/2N5/PP2PPPP/R3KBNR w KQ - 0 7"
board = chess.Board(fen)
long_castle_check = chess.Move.from_uci("e1a1")
self.assertEqual(board.san(long_castle_check), "O-O-O+")
self.assertEqual(board.fen(), fen)
# En passant mate.
fen = "6bk/7b/8/3pP3/8/8/8/Q3K3 w - d6 0 2"
board = chess.Board(fen)
fxe6_mate_ep = chess.Move.from_uci("e5d6")
self.assertEqual(board.san(fxe6_mate_ep), "exd6#")
self.assertEqual(board.fen(), fen)
# Test ambiguation.
fen = "N3k2N/8/8/3N4/N4N1N/2R5/1R6/4K3 w - - 0 1"
board = chess.Board(fen)
self.assertEqual(board.san(chess.Move.from_uci("e1f1")), "Kf1")
self.assertEqual(board.san(chess.Move.from_uci("c3c2")), "Rcc2")
self.assertEqual(board.san(chess.Move.from_uci("b2c2")), "Rbc2")
self.assertEqual(board.san(chess.Move.from_uci("a4b6")), "N4b6")
self.assertEqual(board.san(chess.Move.from_uci("h8g6")), "N8g6")
self.assertEqual(board.san(chess.Move.from_uci("h4g6")), "Nh4g6")
self.assertEqual(board.fen(), fen)
# Do not disambiguate illegal alternatives.
fen = "8/8/8/R2nkn2/8/8/2K5/8 b - - 0 1"
board = chess.Board(fen)
self.assertEqual(board.san(chess.Move.from_uci("f5e3")), "Ne3+")
self.assertEqual(board.fen(), fen)
# Promotion.
fen = "7k/1p2Npbp/8/2P5/1P1r4/3b2QP/3q1pPK/2RB4 b - - 1 29"
board = chess.Board(fen)
self.assertEqual(board.san(chess.Move.from_uci("f2f1q")), "f1=Q")
self.assertEqual(board.san(chess.Move.from_uci("f2f1n")), "f1=N+")
self.assertEqual(board.fen(), fen)
def test_is_legal_move(self):
fen = "3k4/6P1/7P/8/K7/8/8/4R3 w - - 0 1"
board = chess.Board(fen)
# Legal moves: Rg1, g8=R+.
self.assertTrue(chess.Move.from_uci("e1g1") in board.legal_moves)
self.assertTrue(chess.Move.from_uci("g7g8r") in board.legal_moves)
# Impossible promotion: Kb5, h7.
self.assertFalse(chess.Move.from_uci("a5b5q") in board.legal_moves)
self.assertFalse(chess.Move.from_uci("h6h7n") in board.legal_moves)
# Missing promotion.
self.assertFalse(chess.Move.from_uci("g7g8") in board.legal_moves)
self.assertEqual(board.fen(), fen)
def test_move_count(self):
board = chess.Board("1N2k3/P7/8/8/3n4/8/2PP4/R3K2R w KQ - 0 1")
self.assertEqual(len(board.pseudo_legal_moves), 8 + 4 + 3 + 2 + 1 + 6 + 9)
def test_polyglot(self):
# Test polyglot compability using test data from
# http://hardy.uhasselt.be/Toga/book_format.html. Forfeiting castling
# rights should not reset the half move counter, though.
board = chess.Board()
self.assertEqual(board.fen(), "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
self.assertEqual(board.zobrist_hash(), 0x463b96181691fc9c)
board.push_san("e4")
self.assertEqual(board.fen(), "rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq - 0 1")
self.assertEqual(board.zobrist_hash(), 0x823c9b50fd114196)
board.push_san("d5")
self.assertEqual(board.fen(), "rnbqkbnr/ppp1pppp/8/3p4/4P3/8/PPPP1PPP/RNBQKBNR w KQkq - 0 2")
self.assertEqual(board.zobrist_hash(), 0x0756b94461c50fb0)
board.push_san("e5")
self.assertEqual(board.fen(), "rnbqkbnr/ppp1pppp/8/3pP3/8/8/PPPP1PPP/RNBQKBNR b KQkq - 0 2")
self.assertEqual(board.zobrist_hash(), 0x662fafb965db29d4)
board.push_san("f5")
self.assertEqual(board.fen(), "rnbqkbnr/ppp1p1pp/8/3pPp2/8/8/PPPP1PPP/RNBQKBNR w KQkq f6 0 3")
self.assertEqual(board.zobrist_hash(), 0x22a48b5a8e47ff78)
board.push_san("Ke2")
self.assertEqual(board.fen(), "rnbqkbnr/ppp1p1pp/8/3pPp2/8/8/PPPPKPPP/RNBQ1BNR b kq - 1 3")
self.assertEqual(board.zobrist_hash(), 0x652a607ca3f242c1)
board.push_san("Kf7")
self.assertEqual(board.fen(), "rnbq1bnr/ppp1pkpp/8/3pPp2/8/8/PPPPKPPP/RNBQ1BNR w - - 2 4")
self.assertEqual(board.zobrist_hash(), 0x00fdd303c946bdd9)
board = chess.Board()
board.push_san("a4")
board.push_san("b5")
board.push_san("h4")
board.push_san("b4")
board.push_san("c4")
self.assertEqual(board.fen(), "rnbqkbnr/p1pppppp/8/8/PpP4P/8/1P1PPPP1/RNBQKBNR b KQkq c3 0 3")
self.assertEqual(board.zobrist_hash(), 0x3c8123ea7b067637)
board.push_san("bxc3")
board.push_san("Ra3")
self.assertEqual(board.fen(), "rnbqkbnr/p1pppppp/8/8/P6P/R1p5/1P1PPPP1/1NBQKBNR b Kkq - 1 4")
self.assertEqual(board.zobrist_hash(), 0x5c3f9b829b279560)
def test_castling_move_generation_bug(self):
# Specific test position right after castling.
fen = "rnbqkbnr/2pp1ppp/8/4p3/2BPP3/P1N2N2/PB3PPP/2RQ1RK1 b kq - 1 10"
board = chess.Board(fen)
illegal_move = chess.Move.from_uci("g1g2")
self.assertFalse(illegal_move in board.legal_moves)
self.assertFalse(illegal_move in list(board.legal_moves))
self.assertFalse(illegal_move in board.pseudo_legal_moves)
self.assertFalse(illegal_move in list(board.pseudo_legal_moves))
# Make a move.
board.push_san("exd4")
# Already castled short, can not castle long.
illegal_move = chess.Move.from_uci("e1c1")
self.assertFalse(illegal_move in board.pseudo_legal_moves)
self.assertFalse(illegal_move in board.generate_pseudo_legal_moves())
self.assertFalse(illegal_move in board.legal_moves)
self.assertFalse(illegal_move in list(board.legal_moves))
# Unmake the move.
board.pop()
# Generate all pseudo legal moves, two moves deep.
for move in board.pseudo_legal_moves:
board.push(move)
for move in board.pseudo_legal_moves:
board.push(move)
board.pop()
board.pop()
# Check that board is still consistent.
self.assertEqual(board.fen(), fen)
self.assertTrue(board.kings & chess.BB_G1)
self.assertTrue(board.occupied & chess.BB_G1)
self.assertTrue(board.occupied_co[chess.WHITE] & chess.BB_G1)
self.assertEqual(board.piece_at(chess.G1), chess.Piece(chess.KING, chess.WHITE))
self.assertEqual(board.piece_at(chess.C1), chess.Piece(chess.ROOK, chess.WHITE))
def test_move_generation_bug(self):
# Specific problematic position.
fen = "4kb1r/3b1ppp/8/1r2pNB1/6P1/pP2QP2/P6P/4R1K1 w k - 0 27"
board = chess.Board(fen)
# Make a move.
board.push_san("Re2")
# Check for the illegal move.
illegal_move = chess.Move.from_uci("e8f8")
self.assertFalse(illegal_move in board.pseudo_legal_moves)
self.assertFalse(illegal_move in board.generate_pseudo_legal_moves())
self.assertFalse(illegal_move in board.legal_moves)
self.assertFalse(illegal_move in board.generate_legal_moves())
# Generate all pseudo legal moves.
for a in board.pseudo_legal_moves:
board.push(a)
board.pop()
# Unmake the move.
board.pop()
# Check that board is still consistent.
self.assertEqual(board.fen(), fen)
def test_stateful_move_generation_bug(self):
board = chess.Board("r1b1k3/p2p1Nr1/n2b3p/3pp1pP/2BB1p2/P3P2R/Q1P3P1/R3K1N1 b Qq - 0 1")
count = 0
for move in board.legal_moves:
board.push(move)
list(board.generate_legal_moves())
count += 1
board.pop()
self.assertEqual(count, 26)
def test_ninesixty_castling_bug(self):
board = chess.Board("4r3/3k4/8/8/8/8/q5PP/1R1KR3 w Q - 2 2")
move = chess.Move.from_uci("d1b1")
self.assertTrue(board.is_castling(move))
self.assertTrue(move in board.generate_pseudo_legal_moves())
self.assertTrue(board.is_pseudo_legal(move))
self.assertTrue(move in board.generate_legal_moves())
self.assertTrue(board.is_legal(move))
self.assertEqual(board.parse_san("O-O-O+"), move)
self.assertEqual(board.san(move), "O-O-O+")
def test_equality(self):
self.assertEqual(chess.Board(), chess.Board())
self.assertFalse(chess.Board() != chess.Board())
a = chess.Board()
a.push_san("d4")
b = chess.Board()
b.push_san("d3")
self.assertNotEqual(a, b)
self.assertFalse(a == b)
def test_status(self):
board = chess.Board()
self.assertEqual(board.status(), chess.STATUS_VALID)
self.assertTrue(board.is_valid())
board.remove_piece_at(chess.H1)
self.assertTrue(board.status() & chess.STATUS_BAD_CASTLING_RIGHTS)
board.remove_piece_at(chess.E8)
self.assertTrue(board.status() & chess.STATUS_NO_BLACK_KING)
# The en passant square should be set even if no capture is actually
# possible.
board = chess.Board()
board.push_san("e4")
self.assertEqual(board.ep_square, chess.E3)
self.assertEqual(board.status(), chess.STATUS_VALID)
# But there must indeed be a pawn there.
board.remove_piece_at(chess.E4)
self.assertEqual(board.status(), chess.STATUS_INVALID_EP_SQUARE)
# King must be between the two rooks.
board = chess.Board("2rrk3/8/8/8/8/8/3PPPPP/2RK4 w cd - 0 1")
self.assertEqual(board.status(), chess.STATUS_BAD_CASTLING_RIGHTS)
# Generally valid position, but not valid standard chess position due
# to non-standard castling rights. Chess960 start position #0.
board = chess.Board("bbqnnrkr/pppppppp/8/8/8/8/PPPPPPPP/BBQNNRKR w KQkq - 0 1", chess960=True)
self.assertEqual(board.status(), chess.STATUS_VALID)
board = chess.Board("bbqnnrkr/pppppppp/8/8/8/8/PPPPPPPP/BBQNNRKR w KQkq - 0 1", chess960=False)
self.assertEqual(board.status(), chess.STATUS_BAD_CASTLING_RIGHTS)
def test_epd(self):
# Create an EPD with a move and a string.
board = chess.Board("1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b - - 0 1")
epd = board.epd(bm=chess.Move(chess.D6, chess.D1), id="BK.01")
self.assertTrue(epd in (
"1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b - - bm Qd1+; id \"BK.01\";",
"1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b - - id \"BK.01\"; bm Qd1+;" ))
# Create an EPD with a noop.
board = chess.Board("4k3/8/8/8/8/8/8/4K3 w - - 0 1")
self.assertEqual(board.epd(noop=None), "4k3/8/8/8/8/8/8/4K3 w - - noop;")
# Create an EPD with a variation.
board = chess.Board("k7/8/8/8/8/8/4PPPP/4K1NR w K - 0 1")
epd = board.epd(pv=[
chess.Move.from_uci("g1f3"), # Nf3
chess.Move.from_uci("a8a7"), # Ka7
chess.Move.from_uci("e1h1"), # O-O
])
self.assertEqual(epd, "k7/8/8/8/8/8/4PPPP/4K1NR w K - pv Nf3 Ka7 O-O;")
# Create an EPD with a set of moves.
board = chess.Board("8/8/8/4k3/8/1K6/8/8 b - - 0 1")
epd = board.epd(bm=[
chess.Move.from_uci("e5e6"), # Ke6
chess.Move.from_uci("e5e4"), # Ke4
])
self.assertEqual(epd, "8/8/8/4k3/8/1K6/8/8 b - - bm Ke6 Ke4;")
# Test loading an EPD.
board = chess.Board()
operations = board.set_epd("r2qnrnk/p2b2b1/1p1p2pp/2pPpp2/1PP1P3/PRNBB3/3QNPPP/5RK1 w - - bm f4; id \"BK.24\";")
self.assertEqual(board.fen(), "r2qnrnk/p2b2b1/1p1p2pp/2pPpp2/1PP1P3/PRNBB3/3QNPPP/5RK1 w - - 0 1")
self.assertTrue(chess.Move(chess.F2, chess.F4) in operations["bm"])
self.assertEqual(operations["id"], "BK.24")
# Test loading an EPD with half counter operations.
board = chess.Board()
operations = board.set_epd("4k3/8/8/8/8/8/8/4K3 b - - fmvn 17; hmvc 13")
self.assertEqual(board.fen(), "4k3/8/8/8/8/8/8/4K3 b - - 13 17")
self.assertEqual(operations["fmvn"], 17)
self.assertEqual(operations["hmvc"], 13)
# Test context of parsed SANs.
board = chess.Board()
operations = board.set_epd("4k3/8/8/2N5/8/8/8/4K3 w - - test Ne4")
self.assertEqual(operations["test"], chess.Move(chess.C5, chess.E4))
# Test parsing EPD with a set of moves.
board = chess.Board()
operations = board.set_epd("4k3/8/3QK3/8/8/8/8/8 w - - bm Qe7# Qb8#;")
self.assertEqual(board.fen(), "4k3/8/3QK3/8/8/8/8/8 w - - 0 1")
self.assertEqual(len(operations["bm"]), 2)
self.assertTrue(chess.Move.from_uci("d6b8") in operations["bm"])
self.assertTrue(chess.Move.from_uci("d6e7") in operations["bm"])
# Test parsing EPD with a stack of moves.
board = chess.Board()
operations = board.set_epd("6k1/1p6/6K1/8/8/8/8/7Q w - - pv Qh7+ Kf8 Qf7#;")
self.assertEqual(len(operations["pv"]), 3)
self.assertEqual(operations["pv"][0], chess.Move.from_uci("h1h7"))
self.assertEqual(operations["pv"][1], chess.Move.from_uci("g8f8"))
self.assertEqual(operations["pv"][2], chess.Move.from_uci("h7f7"))
def test_null_moves(self):
self.assertEqual(str(chess.Move.null()), "0000")
self.assertEqual(chess.Move.null().uci(), "0000")
self.assertFalse(chess.Move.null())
fen = "rnbqkbnr/ppp1pppp/8/2Pp4/8/8/PP1PPPPP/RNBQKBNR w KQkq d6 0 2"
board = chess.Board(fen)
self.assertEqual(chess.Move.from_uci("0000"), board.push_san("--"))
self.assertEqual(board.fen(), "rnbqkbnr/ppp1pppp/8/2Pp4/8/8/PP1PPPPP/RNBQKBNR b KQkq - 1 2")
self.assertEqual(chess.Move.null(), board.pop())
self.assertEqual(board.fen(), fen)
def test_attackers(self):
board = chess.Board("r1b1k2r/pp1n1ppp/2p1p3/q5B1/1b1P4/P1n1PN2/1P1Q1PPP/2R1KB1R b Kkq - 3 10")
attackers = board.attackers(chess.WHITE, chess.C3)
self.assertEqual(len(attackers), 3)
self.assertTrue(chess.C1 in attackers)
self.assertTrue(chess.D2 in attackers)
self.assertTrue(chess.B2 in attackers)
self.assertFalse(chess.D4 in attackers)
self.assertFalse(chess.E1 in attackers)
def test_en_passant_attackers(self):
board = chess.Board("4k3/8/8/8/4pPp1/8/8/4K3 b - f3 0 1")
# Still attacking the en passant square.
attackers = board.attackers(chess.BLACK, chess.F3)
self.assertEqual(len(attackers), 2)
self.assertTrue(chess.E4 in attackers)
self.assertTrue(chess.G4 in attackers)
# Also attacking the pawn.
attackers = board.attackers(chess.BLACK, chess.F4)
self.assertEqual(len(attackers), 2)
self.assertTrue(chess.E4 in attackers)
self.assertTrue(chess.G4 in attackers)
def test_attacks(self):
board = chess.Board("5rk1/p5pp/2p3p1/1p1pR3/3P2P1/2N5/PP3n2/2KB4 w - - 1 26")
attacks = board.attacks(chess.E5)
self.assertEqual(len(attacks), 11)
self.assertTrue(chess.D5 in attacks)
self.assertTrue(chess.E1 in attacks)
self.assertTrue(chess.F5 in attacks)
self.assertFalse(chess.E5 in attacks)
self.assertFalse(chess.C5 in attacks)
self.assertFalse(chess.F4 in attacks)
self.assertFalse(board.attacks(chess.G1))
def test_clear(self):
board = chess.Board()
board.clear()
self.assertEqual(board.turn, chess.WHITE)
self.assertEqual(board.fullmove_number, 1)
self.assertEqual(board.halfmove_clock, 0)
self.assertEqual(board.castling_rights, chess.BB_VOID)
self.assertFalse(board.ep_square)
self.assertFalse(board.piece_at(chess.E1))
self.assertEqual(chess.pop_count(board.occupied), 0)
def test_threefold_repetition(self):
board = chess.Board()
# Go back and forth with the nights to reach the starting position
# for a second time.
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Nf3")
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Nf6")
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Ng1")
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Ng8")
# Once more.
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Nf3")
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Nf6")
self.assertFalse(board.can_claim_threefold_repetition())
board.push_san("Ng1")
# Now black can go back to the starting position (thus reaching it a
# third time.)
self.assertTrue(board.can_claim_threefold_repetition())
board.push_san("Ng8")
# They indee do it. Also white can now claim.
self.assertTrue(board.can_claim_threefold_repetition())
# But not after a different move.
board.push_san("e4")
self.assertFalse(board.can_claim_threefold_repetition())
# Undo moves and check if everything works backwards.
board.pop() # e4
self.assertTrue(board.can_claim_threefold_repetition())
board.pop() # Ng8
self.assertTrue(board.can_claim_threefold_repetition())
while board.move_stack:
board.pop()
self.assertFalse(board.can_claim_threefold_repetition())
def test_fivefold_repetition(self):
fen = "rnbq1rk1/ppp3pp/3bpn2/3p1p2/2PP4/2NBPN2/PP3PPP/R1BQK2R w KQ - 3 7"
board = chess.Board(fen)
# Repeat the position up to the fourth time.
for i in range(3):
board.push_san("Be2")
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Ne4")
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Bd3")
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Nf6")
self.assertEqual(board.fen().split()[0], fen.split()[0])
self.assertFalse(board.is_fivefold_repetition())
self.assertFalse(board.is_game_over())
# Repeat it once more. Now it is a five-fold repetition.
board.push_san("Be2")
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Ne4")
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Bd3")
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Nf6")
self.assertEqual(board.fen().split()[0], fen.split()[0])
self.assertTrue(board.is_fivefold_repetition())
self.assertTrue(board.is_game_over())
# It is also a threefold repetition.
self.assertTrue(board.can_claim_threefold_repetition())
# Now no longer.
board.push_san("Qc2")
board.push_san("Qd7")
self.assertFalse(board.can_claim_threefold_repetition())
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Qd2")
board.push_san("Qe7")
self.assertFalse(board.can_claim_threefold_repetition())
self.assertFalse(board.is_fivefold_repetition())
# Give the possibility to repeat.
board.push_san("Qd1")
self.assertFalse(board.is_fivefold_repetition())
self.assertTrue(board.can_claim_threefold_repetition())
# Do in fact repeat.
self.assertFalse(board.is_fivefold_repetition())
board.push_san("Qd8")
# This is a threefold repetition but not a fivefold repetition, because
# consecutive moves are required for that.
self.assertTrue(board.can_claim_threefold_repetition())
self.assertFalse(board.is_fivefold_repetition())
self.assertEqual(board.fen().split()[0], fen.split()[0])
def test_fifty_moves(self):
# Test positions from Timman - Lutz (1995).
board = chess.Board()
self.assertFalse(board.can_claim_fifty_moves())
board = chess.Board("8/5R2/8/r2KB3/6k1/8/8/8 w - - 19 79")
self.assertFalse(board.can_claim_fifty_moves())
board = chess.Board("8/8/6r1/4B3/8/4K2k/5R2/8 b - - 68 103")
self.assertFalse(board.can_claim_fifty_moves())
board = chess.Board("6R1/7k/8/8/1r3B2/5K2/8/8 w - - 99 119")
self.assertFalse(board.can_claim_fifty_moves())
board = chess.Board("8/7k/8/6R1/1r3B2/5K2/8/8 b - - 100 119")
self.assertTrue(board.can_claim_fifty_moves())
board = chess.Board("8/7k/8/1r3KR1/5B2/8/8/8 w - - 105 122")
self.assertTrue(board.can_claim_fifty_moves())
# Once checkmated it is too late to claim.
board = chess.Board("k7/8/NKB5/8/8/8/8/8 b - - 105 176")
self.assertFalse(board.can_claim_fifty_moves())
# A stalemate is a draw, but you can not and do not need to claim it by
# the fifty move rule.
board = chess.Board("k7/3N4/1K6/1B6/8/8/8/8 b - - 99 1")
self.assertTrue(board.is_stalemate())
self.assertTrue(board.is_game_over())
self.assertFalse(board.can_claim_fifty_moves())
self.assertFalse(board.can_claim_draw())
def test_ep_legality(self):
move = chess.Move.from_uci("h5g6")
board = chess.Board("rnbqkbnr/pppppp2/7p/6pP/8/8/PPPPPPP1/RNBQKBNR w KQkq g6 0 3")
self.assertTrue(board.is_legal(move))
board.push_san("Nf3")
self.assertFalse(board.is_legal(move))
board.push_san("Nf6")
self.assertFalse(board.is_legal(move))
move = chess.Move.from_uci("c4d3")
board = chess.Board("rnbqkbnr/pp1ppppp/8/8/2pP4/2P2N2/PP2PPPP/RNBQKB1R b KQkq d3 0 3")
self.assertTrue(board.is_legal(move))
board.push_san("Qc7")
self.assertFalse(board.is_legal(move))
board.push_san("Bd2")
self.assertFalse(board.is_legal(move))
def test_pseudo_legality(self):
sample_moves = [
chess.Move(chess.A2, chess.A4),
chess.Move(chess.C1, chess.E3),
chess.Move(chess.G8, chess.F6),
chess.Move(chess.D7, chess.D8, chess.QUEEN),
chess.Move(chess.E5, chess.E4) ]
sample_fens = [
chess.STARTING_FEN,
"rnbqkbnr/pp1ppppp/2p5/8/6P1/2P5/PP1PPP1P/RNBQKBNR b KQkq - 0 1",
"rnb1kbnr/ppq1pppp/2pp4/8/6P1/2P5/PP1PPPBP/RNBQK1NR w KQkq - 0 1",
"rn2kbnr/p1q1ppp1/1ppp3p/8/4B1b1/2P4P/PPQPPP2/RNB1K1NR w KQkq - 0 1",
"rnkq1bnr/p3ppp1/1ppp3p/3B4/6b1/2PQ3P/PP1PPP2/RNB1K1NR w KQ - 0 1",
"rn1q1bnr/3kppp1/2pp3p/pp6/1P2b3/2PQ1N1P/P2PPPB1/RNB1K2R w KQ - 0 1",
"rnkq1bnr/4pp2/2pQ2pp/pp6/1P5N/2P4P/P2PPP2/RNB1KB1b w Q - 0 1",
"rn3b1r/1kq1p3/2pQ1npp/Pp6/4b3/2PPP2P/P4P2/RNB1KB2 w Q - 0 1",
"r4br1/8/k1p2npp/Ppn1p3/P7/2PPP1qP/4bPQ1/RNB1KB2 w Q - 0 1",
"rnbqk1nr/p2p3p/1p5b/2pPppp1/8/P7/1PPQPPPP/RNB1KBNR w KQkq c6 0 1",
"rnb1k2r/pp1p1p1p/1q1P4/2pnpPp1/6P1/2N5/PP1BP2P/R2QKBNR w KQkq e6 0 1",
"1n4kr/2B4p/2nb2b1/ppp5/P1PpP3/3P4/5K2/1N1R4 b - c3 0 1",
"r2n3r/1bNk2pp/6P1/pP3p2/3pPqnP/1P1P1p1R/2P3B1/Q1B1bKN1 b - e3 0 1" ]
for sample_fen in sample_fens:
board = chess.Board(sample_fen)
pseudo_legal_moves = list(board.generate_pseudo_legal_moves())
# Ensure that all moves generated as pseudo legal pass the pseudo-
# legality check.
for move in pseudo_legal_moves:
self.assertTrue(board.is_pseudo_legal(move))
# Check that moves not generated as pseudo legal do not pass the
# pseudo legality check.
for move in sample_moves:
if not move in pseudo_legal_moves:
self.assertFalse(board.is_pseudo_legal(move))
def test_pieces(self):
board = chess.Board()
king = board.pieces(chess.KING, chess.WHITE)
self.assertTrue(chess.E1 in king)
self.assertEqual(len(king), 1)
def test_string_conversion(self):
board = chess.Board("7k/1p1qn1b1/pB1p1n2/3Pp3/4Pp1p/2QN1B2/PP4PP/6K1 w - - 0 28")
self.assertEqual(str(board), textwrap.dedent(u"""\
. . . . . . . k
. p . q n . b .
p B . p . n . .
. . . P p . . .
. . . . P p . p
. . Q N . B . .
P P . . . . P P
. . . . . . K ."""))
self.assertEqual(board.__unicode__(), textwrap.dedent(u"""\
. . . . . . . ♚
. ♟ . ♛ ♞ . ♝ .
♟ ♗ . ♟ . ♞ . .
. . . ♙ ♟ . . .
. . . . ♙ ♟ . ♟
. . ♕ ♘ . ♗ . .
♙ ♙ . . . . ♙ ♙
. . . . . . ♔ ."""))
html = board.__html__()
self.assertTrue(u"♛" in html)
self.assertTrue(u"♙" in html)
self.assertFalse(u"♜" in html)
self.assertFalse(u"♖" in html)
def test_move_info(self):
board = chess.Board("r1bqkb1r/p3np2/2n1p2p/1p4pP/2pP4/4PQ1N/1P2BPP1/RNB1K2R w KQkq g6 0 11")
self.assertTrue(board.is_capture(board.parse_san("Qxf7+")))
self.assertFalse(board.is_en_passant(board.parse_san("Qxf7+")))
self.assertFalse(board.is_castling(board.parse_san("Qxf7+")))
self.assertTrue(board.is_capture(board.parse_san("hxg6")))
self.assertTrue(board.is_en_passant(board.parse_san("hxg6")))
self.assertFalse(board.is_castling(board.parse_san("hxg6")))
self.assertFalse(board.is_capture(board.parse_san("b3")))
self.assertFalse(board.is_en_passant(board.parse_san("b3")))
self.assertFalse(board.is_castling(board.parse_san("b3")))
self.assertFalse(board.is_capture(board.parse_san("Ra6")))
self.assertFalse(board.is_en_passant(board.parse_san("Ra6")))
self.assertFalse(board.is_castling(board.parse_san("Ra6")))
self.assertFalse(board.is_capture(board.parse_san("O-O")))
self.assertFalse(board.is_en_passant(board.parse_san("O-O")))
self.assertTrue(board.is_castling(board.parse_san("O-O")))
def test_pin(self):
board = chess.Board("rnb1k1nr/2pppppp/3P4/8/1b5q/8/PPPNPBPP/RNBQKB1R w KQkq - 0 1")
self.assertTrue(board.is_pinned(chess.WHITE, chess.F2))
self.assertTrue(board.is_pinned(chess.WHITE, chess.D2))
self.assertFalse(board.is_pinned(chess.WHITE, chess.E1))
self.assertFalse(board.is_pinned(chess.BLACK, chess.H4))
self.assertFalse(board.is_pinned(chess.BLACK, chess.E8))
self.assertEqual(board.pin(chess.WHITE, chess.B1), chess.BB_ALL)
self.assertEqual(board.pin(chess.WHITE, chess.F2), chess.BB_E1 | chess.BB_F2 | chess.BB_G3 | chess.BB_H4)
self.assertEqual(board.pin(chess.WHITE, chess.D2), chess.BB_E1 | chess.BB_D2 | chess.BB_C3 | chess.BB_B4 | chess.BB_A5)
def test_impossible_en_passant(self):
# Not a pawn there.
board = chess.Board("1b1b4/8/b1P5/2kP4/8/2b4K/8/8 w - c6 0 1")
self.assertTrue(board.status() & chess.STATUS_INVALID_EP_SQUARE)
# Sixth rank square not empty.
board = chess.Board("5K2/8/2pp2Pp/2PP4/P5Pp/2pP1Ppp/P6p/7k b - g3 0 1")
self.assertTrue(board.status() & chess.STATUS_INVALID_EP_SQUARE)
# Seventh rank square not empty.
board = chess.Board("8/7k/8/7p/8/8/8/K7 w - h6 0 1")
self.assertTrue(board.status() & chess.STATUS_INVALID_EP_SQUARE)
class LegalMoveGeneratorTestCase(unittest.TestCase):
def test_list_conversion(self):
self.assertEqual(len(list(chess.Board().legal_moves)), 20)
def test_nonzero(self):
self.assertTrue(chess.Board().legal_moves)
caro_kann_mate = chess.Board("r1bqkb1r/pp1npppp/2pN1n2/8/3P4/8/PPP1QPPP/R1B1KBNR b KQkq - 4 6")
self.assertFalse(caro_kann_mate.legal_moves)
def test_string_conversion(self):
board = chess.Board("r3k1nr/ppq1pp1p/2p3p1/8/1PPR4/2N5/P3QPPP/5RK1 b kq b3 0 16")
self.assertTrue("Qxh2+" in str(board.legal_moves))
self.assertTrue("Qxh2+" in repr(board.legal_moves))
self.assertTrue("Qxh2+" in str(board.pseudo_legal_moves))
self.assertTrue("Qxh2+" in repr(board.pseudo_legal_moves))
self.assertTrue("e8d7" in str(board.pseudo_legal_moves))
self.assertTrue("e8d7" in repr(board.pseudo_legal_moves))
class SquareSetTestCase(unittest.TestCase):
def test_equality(self):
a1 = chess.SquareSet(chess.BB_RANK_4)
a2 = chess.SquareSet(chess.BB_RANK_4)
b1 = chess.SquareSet(chess.BB_RANK_5 | chess.BB_RANK_6)
b2 = chess.SquareSet(chess.BB_RANK_5 | chess.BB_RANK_6)
self.assertEqual(a1, a2)
self.assertEqual(b1, b2)
self.assertFalse(a1 != a2)
self.assertFalse(b1 != b2)
self.assertNotEqual(a1, b1)
self.assertNotEqual(a2, b2)
self.assertFalse(a1 == b1)
self.assertFalse(a2 == b2)
self.assertEqual(chess.SquareSet(chess.BB_ALL), chess.BB_ALL)
self.assertEqual(chess.BB_ALL, chess.SquareSet(chess.BB_ALL))
def test_string_conversion(self):
expected = textwrap.dedent("""\
. . . . . . . 1
. 1 . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
1 1 1 1 1 1 1 1""")
bb = chess.SquareSet(chess.BB_H8 | chess.BB_B7 | chess.BB_RANK_1)
self.assertEqual(str(bb), expected)
def test_iter(self):
bb = chess.SquareSet(chess.BB_G7 | chess.BB_G8)
self.assertEqual(list(bb), [chess.G7, chess.G8])
def test_reversed(self):
bb = chess.SquareSet(chess.BB_A1 | chess.BB_B1 | chess.BB_A7 | chess.BB_E1)
self.assertEqual(list(reversed(bb)), [chess.A7, chess.E1, chess.B1, chess.A1])
def test_arithmetic(self):
self.assertEqual(chess.SquareSet(chess.BB_RANK_2) & chess.BB_FILE_D, chess.BB_D2)
self.assertEqual(chess.SquareSet(chess.BB_ALL) ^ chess.BB_VOID, chess.BB_ALL)
self.assertEqual(chess.SquareSet(chess.BB_C1) | chess.BB_FILE_C, chess.BB_FILE_C)
bb = chess.SquareSet(chess.BB_VOID)
bb ^= chess.BB_ALL
self.assertEqual(bb, chess.BB_ALL)
bb &= chess.BB_E4
self.assertEqual(bb, chess.BB_E4)
bb |= chess.BB_RANK_4
self.assertEqual(bb, chess.BB_RANK_4)
self.assertEqual(chess.SquareSet(chess.BB_F3) << 1, chess.BB_G3)
self.assertEqual(chess.SquareSet(chess.BB_C8) >> 2, chess.BB_A8)
bb = chess.SquareSet(chess.BB_D1)
bb <<= 1
self.assertEqual(bb, chess.BB_E1)
bb >>= 2
self.assertEqual(bb, chess.BB_C1)
def test_immutable_set_operations(self):
self.assertFalse(chess.SquareSet(chess.BB_A1).issubset(chess.BB_RANK_1))
self.assertTrue(chess.SquareSet(chess.BB_RANK_1).issubset(chess.BB_A1))
self.assertTrue(chess.SquareSet(chess.BB_A1).issuperset(chess.BB_RANK_1))
self.assertFalse(chess.SquareSet(chess.BB_RANK_1).issuperset(chess.BB_A1))
self.assertEqual(chess.SquareSet(chess.BB_A1).union(chess.BB_FILE_A), chess.BB_FILE_A)
self.assertEqual(chess.SquareSet(chess.BB_A1).intersection(chess.BB_A2), chess.BB_VOID)
self.assertEqual(chess.SquareSet(chess.BB_A1).difference(chess.BB_A2), chess.BB_A1)
self.assertEqual(chess.SquareSet(chess.BB_A1).symmetric_difference(chess.BB_A2), chess.BB_A1 | chess.BB_A2)
self.assertEqual(chess.SquareSet(chess.BB_C5).copy(), chess.BB_C5)
def test_mutable_set_operations(self):
squares = chess.SquareSet(chess.BB_A1)
squares.update(chess.BB_FILE_H)
self.assertEqual(squares, chess.BB_A1 | chess.BB_FILE_H)
squares.intersection_update(chess.BB_RANK_8)
self.assertEqual(squares, chess.BB_H8)
squares.difference_update(chess.BB_A1)
self.assertEqual(squares, chess.BB_H8)
squares.symmetric_difference_update(chess.BB_A1)
self.assertEqual(squares, chess.BB_A1 | chess.BB_H8)
squares.add(chess.A3)
self.assertEqual(squares, chess.BB_A1 | chess.BB_A3 | chess.BB_H8)
squares.remove(chess.H8)
self.assertEqual(squares, chess.BB_A1 | chess.BB_A3)
with self.assertRaises(KeyError):
squares.remove(chess.H8)
squares.discard(chess.H8)
squares.discard(chess.A1)
self.assertEqual(squares, chess.BB_A3)
squares.clear()
self.assertEqual(squares, chess.BB_VOID)
with self.assertRaises(KeyError):
squares.pop()
squares.add(chess.C7)
self.assertEqual(squares.pop(), chess.C7)
self.assertEqual(squares, chess.BB_VOID)
class PolyglotTestCase(unittest.TestCase):
def test_performance_bin(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
pos = chess.Board()
e4 = next(book.find_all(pos))
self.assertEqual(e4.move(), pos.parse_san("e4"))
pos.push(e4.move())
e5 = next(book.find_all(pos))
self.assertEqual(e5.move(), pos.parse_san("e5"))
pos.push(e5.move())
def test_mainline(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
board = chess.Board()
while True:
try:
entry = book.find(board)
except IndexError:
break
else:
board.push(entry.move())
self.assertEqual(board.fen(), "r2q1rk1/4bppp/p2p1n2/np5b/3BP1P1/5N1P/PPB2P2/RN1QR1K1 b - - 0 15")
def test_lasker_trap(self):
with chess.polyglot.open_reader("data/polyglot/lasker-trap.bin") as book:
board = chess.Board("rnbqk1nr/ppp2ppp/8/4P3/1BP5/8/PP2KpPP/RN1Q1BNR b kq - 1 7")
entry = book.find(board)
cute_underpromotion = entry.move()
self.assertEqual(cute_underpromotion, board.parse_san("fxg1=N+"))
def test_castling(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
# White decides between short castling and long castling at this
# turning point in the Queens Gambit Exchange.
pos = chess.Board("r1bqr1k1/pp1nbppp/2p2n2/3p2B1/3P4/2NBP3/PPQ1NPPP/R3K2R w KQ - 5 10")
moves = set(entry.move() for entry in book.find_all(pos))
self.assertTrue(pos.parse_san("O-O") in moves)
self.assertTrue(pos.parse_san("O-O-O") in moves)
self.assertTrue(pos.parse_san("h3") in moves)
self.assertEqual(len(moves), 3)
# Black usually castles long at this point in the Ruy Lopez
# Exchange.
pos = chess.Board("r3k1nr/1pp1q1pp/p1pb1p2/4p3/3PP1b1/2P1BN2/PP1N1PPP/R2Q1RK1 b kq - 4 9")
moves = set(entry.move() for entry in book.find_all(pos))
self.assertTrue(pos.parse_san("O-O-O") in moves)
self.assertEqual(len(moves), 1)
def test_empty_book(self):
with chess.polyglot.open_reader("data/polyglot/empty.bin") as book:
self.assertEqual(len(book), 0)
entries = book.find_all(chess.Board())
self.assertEqual(len(list(entries)), 0)
def test_reversed(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
# Last is first of reversed.
self.assertEqual(book[-1], next(reversed(book)))
# First is last of reversed.
for last in reversed(book):
pass
self.assertEqual(book[0], last)
def test_random_choice(self):
class FirstMockRandom(object):
@staticmethod
def randint(first, last):
assert first <= last
return first
class LastMockRandom(object):
@staticmethod
def randint(first, last):
assert first <= last
return last
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
# Uniform choice.
entry = book.choice(chess.Board(), random=FirstMockRandom())
self.assertEqual(entry.move(), chess.Move.from_uci("e2e4"))
entry = book.choice(chess.Board(), random=LastMockRandom())
self.assertEqual(entry.move(), chess.Move.from_uci("c2c4"))
# Weighted choice.
entry = book.weighted_choice(chess.Board(), random=FirstMockRandom())
self.assertEqual(entry.move(), chess.Move.from_uci("e2e4"))
entry = book.weighted_choice(chess.Board(), random=LastMockRandom())
self.assertEqual(entry.move(), chess.Move.from_uci("c2c4"))
def test_find(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
entry = book.find(chess.Board())
self.assertEqual(entry.move(), chess.Move.from_uci("e2e4"))
def test_contains(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
for entry in book:
self.assertTrue(entry in book)
def test_last(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
last_entry = book[len(book)-1]
self.assertTrue(any(book.find_all(last_entry.key)))
self.assertTrue(all(book.find_all(last_entry.key)))
def test_minimum_weight(self):
with chess.polyglot.open_reader("data/polyglot/performance.bin") as book:
with self.assertRaises(IndexError):
book.find(chess.Board(), minimum_weight=2)
class PgnTestCase(unittest.TestCase):
def test_exporter(self):
game = chess.pgn.Game()
game.comment = "Test game:"
game.headers["Result"] = "*"
e4 = game.add_variation(game.board().parse_san("e4"))
e4.comment = "Scandinavian defense:"
e4_d5 = e4.add_variation(e4.board().parse_san("d5"))
e4_h5 = e4.add_variation(e4.board().parse_san("h5"))
e4_h5.nags.add(chess.pgn.NAG_MISTAKE)
e4_h5.starting_comment = "This"
e4_h5.comment = "is nonesense"
e4_e5 = e4.add_variation(e4.board().parse_san("e5"))
e4_e5_Qf3 = e4_e5.add_variation(e4_e5.board().parse_san("Qf3"))
e4_e5_Qf3.nags.add(chess.pgn.NAG_MISTAKE)
e4_c5 = e4.add_variation(e4.board().parse_san("c5"))
e4_c5.comment = "Sicilian"
e4_d5_exd5 = e4_d5.add_main_variation(e4_d5.board().parse_san("exd5"))
# Test string exporter with various options.
exporter = chess.pgn.StringExporter()
game.export(exporter, headers=False, comments=False, variations=False)
self.assertEqual(str(exporter), "1. e4 d5 2. exd5 *")
exporter = chess.pgn.StringExporter()
game.export(exporter, headers=False, comments=False)
self.assertEqual(str(exporter), "1. e4 d5 ( 1... h5 ) ( 1... e5 2. Qf3 ) ( 1... c5 ) 2. exd5 *")
exporter = chess.pgn.StringExporter()
game.export(exporter)
pgn = textwrap.dedent("""\
[Event "?"]
[Site "?"]
[Date "????.??.??"]
[Round "?"]
[White "?"]
[Black "?"]
[Result "*"]
{ Test game: } 1. e4 { Scandinavian defense: } d5 ( { This } 1... h5 $2
{ is nonesense } ) ( 1... e5 2. Qf3 $2 ) ( 1... c5 { Sicilian } ) 2. exd5 *""")
self.assertEqual(str(exporter), pgn)
# Test file exporter.
virtual_file = StringIO()
exporter = chess.pgn.FileExporter(virtual_file)
game.export(exporter)
self.assertEqual(virtual_file.getvalue(), pgn + "\n\n")
def test_setup(self):
game = chess.pgn.Game()
self.assertEqual(game.board(), chess.Board())
self.assertFalse("FEN" in game.headers)
self.assertFalse("SetUp" in game.headers)
self.assertFalse("Variant" in game.headers)
fen = "rnbqkbnr/pp1ppp1p/6p1/8/3pP3/5N2/PPP2PPP/RNBQKB1R w KQkq - 0 4"
game.setup(fen)
self.assertEqual(game.headers["FEN"], fen)
self.assertEqual(game.headers["SetUp"], "1")
self.assertFalse("Variant" in game.headers)
game.setup(chess.STARTING_FEN)
self.assertFalse("FEN" in game.headers)
self.assertFalse("SetUp" in game.headers)
self.assertFalse("Variant" in game.headers)
# Setup again, while starting FEN is already set.
game.setup(chess.STARTING_FEN)
self.assertFalse("FEN" in game.headers)
self.assertFalse("SetUp" in game.headers)
self.assertFalse("Variant" in game.headers)
game.setup(chess.Board(fen))
self.assertEqual(game.headers["FEN"], fen)
self.assertEqual(game.headers["SetUp"], "1")
self.assertFalse("Variant" in game.headers)
# Chess960 starting position 283.
fen = "rkbqrnnb/pppppppp/8/8/8/8/PPPPPPPP/RKBQRNNB w KQkq - 0 1"
game.setup(fen)
self.assertEqual(game.headers["FEN"], fen)
self.assertEqual(game.headers["SetUp"], "1")
self.assertEqual(game.headers["Variant"], "Chess960")
board = game.board()
self.assertTrue(board.chess960)
self.assertEqual(board.fen(), fen)
def test_promote_to_main(self):
e4 = chess.Move.from_uci("e2e4")
d4 = chess.Move.from_uci("d2d4")
node = chess.pgn.Game()
node.add_variation(e4)
node.add_variation(d4)
self.assertEqual(list(variation.move for variation in node.variations), [e4, d4])
node.promote_to_main(d4)
self.assertEqual(list(variation.move for variation in node.variations), [d4, e4])
def test_read_game(self):
pgn = open("data/pgn/kasparov-deep-blue-1997.pgn")
first_game = chess.pgn.read_game(pgn)
second_game = chess.pgn.read_game(pgn)
third_game = chess.pgn.read_game(pgn)
fourth_game = chess.pgn.read_game(pgn)
fifth_game = chess.pgn.read_game(pgn)
sixth_game = chess.pgn.read_game(pgn)
self.assertTrue(chess.pgn.read_game(pgn) is None)
pgn.close()
self.assertEqual(first_game.headers["Event"], "IBM Man-Machine, New York USA")
self.assertEqual(first_game.headers["Site"], "01")
self.assertEqual(first_game.headers["Result"], "1-0")
self.assertEqual(second_game.headers["Event"], "IBM Man-Machine, New York USA")
self.assertEqual(second_game.headers["Site"], "02")
self.assertEqual(third_game.headers["ECO"], "A00")
self.assertEqual(fourth_game.headers["PlyCount"], "111")
self.assertEqual(fifth_game.headers["Result"], "1/2-1/2")
self.assertEqual(sixth_game.headers["White"], "Deep Blue (Computer)")
self.assertEqual(sixth_game.headers["Result"], "1-0")
def test_comment_at_eol(self):
pgn = StringIO(textwrap.dedent("""\
1. e4 e5 2. Nf3 Nc6 3. Bc4 Bc5 4. c3 Nf6 5. d3 d6 6. Nbd2 a6 $6 (6... Bb6 $5 {
/\ Ne7, c6}) *"""))
game = chess.pgn.read_game(pgn)
# Seek the node after 6.Nbd2 and before 6...a6.
node = game
while node.variations and not node.has_variation(chess.Move.from_uci("a7a6")):
node = node.variation(0)
# Make sure the comment for the second variation is there.
self.assertTrue(5 in node.variation(1).nags)
self.assertEqual(node.variation(1).comment, "/\\ Ne7, c6")
def test_promotion_without_equals(self):
# Example game from https://github.com/rozim/ChessData as originally
# reported.
pgn = StringIO(textwrap.dedent("""\
[Event "It (open)"]
[Site "Aschach (Austria)"]
[Date "2011.12.26"]
[Round "1"]
[White "Ennsberger Ulrich (AUT)"]
[Black "Koller Hans-Juergen (AUT)"]
[Result "0-1"]
[ECO "A45"]
[WhiteElo "2373"]
[BlackElo "2052"]
[ID ""]
[FileName ""]
[Annotator ""]
[Source ""]
[Remark ""]
1.d4 Nf6 2.Bg5 c5 3.d5 Ne4 4.Bf4 Qb6 5.Nd2 Nxd2 6.Bxd2 e6 7.Bc3
d6 8.e4 e5 9.a4 Be7 10.a5 Qc7 11.f4 f6 12.f5 g6 13.Bb5+ Bd7 14.Bc4
gxf5 15.Qh5+ Kd8 16.exf5 Qc8 17.g4 Na6 18.Ne2 b5 19.axb6 axb6
20.O-O Nc7 21.Qf7 h5 22.Qg7 Rf8 23.gxh5 Ne8 24.Rxa8 Nxg7 25.Rxc8+
Kxc8 26.Ng3 Rh8 27.Be2 Be8 28.Be1 Nxh5 29.Bxh5 Bxh5 30.Nxh5 Rxh5
31.h4 Bf8 32.c4 Bh6 33.Bg3 Be3+ 34.Kg2 Kb7 35.Kh3 b5 36.b3 b4
37.Kg4 Rh8 38.Kf3 Bh6 39.Bf2 Ra8 40.Kg4 Bf4 41.Kh5 Ra3 42.Kg6
Rxb3 43.h5 Rf3 44.h6 Bxh6 45.Kxh6 Rxf5 46.Kg6 Rf4 47.Kf7 e4 48.Re1
Rxf2 49.Ke6 Kc7 50.Rh1 b3 51.Rh7+ Kb6 52.Kxd6 b2 53.Rh1 Rd2 54.Rh8
e3 55.Rb8+ Ka5 56.Kxc5 Ka4 57.d6 e2 58.Re8 b1Q 0-1"""))
game = chess.pgn.read_game(pgn)
# Make sure the last move is a promotion.
last_node = game.end()
self.assertEqual(last_node.move.uci(), "b2b1q")
def test_variation_stack(self):
# Ignore superfluous closing brackets.
pgn = StringIO("1. e4 (1. d4))) !? *")
game = chess.pgn.read_game(pgn)
self.assertEqual(game.variation(0).san(), "e4")
self.assertEqual(game.variation(1).san(), "d4")
# Ignore superfluous opening brackets.
pgn = StringIO("((( 1. c4 *")
game = chess.pgn.read_game(pgn)
self.assertEqual(game.variation(0).san(), "c4")
def test_game_starting_comment(self):
pgn = StringIO("{ Game starting comment } 1. d3")
game = chess.pgn.read_game(pgn)
self.assertEqual(game.comment, "Game starting comment")
self.assertEqual(game.variation(0).san(), "d3")
pgn = StringIO("{ Empty game, but has a comment }")
game = chess.pgn.read_game(pgn)
self.assertEqual(game.comment, "Empty game, but has a comment")
def test_annotation_symbols(self):
pgn = StringIO("1. b4?! g6 2. Bb2 Nc6? 3. Bxh8!!")
game = chess.pgn.read_game(pgn)
node = game.variation(0)
self.assertTrue(chess.pgn.NAG_DUBIOUS_MOVE in node.nags)
self.assertEqual(len(node.nags), 1)
node = node.variation(0)
self.assertEqual(len(node.nags), 0)
node = node.variation(0)
self.assertEqual(len(node.nags), 0)
node = node.variation(0)
self.assertTrue(chess.pgn.NAG_MISTAKE in node.nags)
self.assertEqual(len(node.nags), 1)
node = node.variation(0)
self.assertTrue(chess.pgn.NAG_BRILLIANT_MOVE in node.nags)
self.assertEqual(len(node.nags), 1)
def test_tree_traversal(self):
game = chess.pgn.Game()
node = game.add_variation(chess.Move(chess.E2, chess.E4))
alternative_node = game.add_variation(chess.D2, chess.D4)
end_node = node.add_variation(chess.Move(chess.E7, chess.E5))
self.assertEqual(game.root(), game)
self.assertEqual(node.root(), game)
self.assertEqual(alternative_node.root(), game)
self.assertEqual(end_node.root(), game)
self.assertEqual(game.end(), end_node)
self.assertEqual(node.end(), end_node)
self.assertEqual(end_node.end(), end_node)
self.assertEqual(alternative_node.end(), alternative_node)
self.assertTrue(game.is_main_line())
self.assertTrue(node.is_main_line())
self.assertTrue(end_node.is_main_line())
self.assertFalse(alternative_node.is_main_line())
self.assertFalse(game.starts_variation())
self.assertFalse(node.starts_variation())
self.assertFalse(end_node.starts_variation())
self.assertTrue(alternative_node.starts_variation())
def test_promote_demote(self):
game = chess.pgn.Game()
a = game.add_variation(chess.Move(chess.A2, chess.A3))
b = game.add_variation(chess.Move(chess.B2, chess.B3))
self.assertTrue(a.is_main_variation())
self.assertFalse(b.is_main_variation())
self.assertEqual(game.variation(0), a)
self.assertEqual(game.variation(1), b)
game.promote(b)
self.assertTrue(b.is_main_variation())
self.assertFalse(a.is_main_variation())
self.assertEqual(game.variation(0), b)
self.assertEqual(game.variation(1), a)
game.demote(b)
self.assertTrue(a.is_main_variation())
c = game.add_main_variation(chess.Move(chess.C2, chess.C3))
self.assertTrue(c.is_main_variation())
self.assertFalse(a.is_main_variation())
self.assertFalse(b.is_main_variation())
self.assertEqual(game.variation(0), c)
self.assertEqual(game.variation(1), a)
self.assertEqual(game.variation(2), b)
def test_scan_offsets(self):
with open("data/pgn/kasparov-deep-blue-1997.pgn") as pgn:
offsets = list(chess.pgn.scan_offsets(pgn))
self.assertEqual(len(offsets), 6)
pgn.seek(offsets[0])
first_game = chess.pgn.read_game(pgn)
self.assertEqual(first_game.headers["Event"], "IBM Man-Machine, New York USA")
self.assertEqual(first_game.headers["Site"], "01")
pgn.seek(offsets[5])
sixth_game = chess.pgn.read_game(pgn)
self.assertEqual(sixth_game.headers["Event"], "IBM Man-Machine, New York USA")
self.assertEqual(sixth_game.headers["Site"], "06")
def test_scan_headers(self):
with open("data/pgn/kasparov-deep-blue-1997.pgn") as pgn:
offsets = (offset for offset, headers in chess.pgn.scan_headers(pgn)
if headers["Result"] == "1/2-1/2")
first_drawn_game_offset = next(offsets)
pgn.seek(first_drawn_game_offset)
first_drawn_game = chess.pgn.read_game(pgn)
self.assertEqual(first_drawn_game.headers["Site"], "03")
self.assertEqual(first_drawn_game.variation(0).move, chess.Move.from_uci("d2d3"))
def test_black_to_move(self):
game = chess.pgn.Game()
game.setup("8/8/4k3/8/4P3/4K3/8/8 b - - 0 17")
node = game
node = node.add_main_variation(chess.Move.from_uci("e6d6"))
node = node.add_main_variation(chess.Move.from_uci("e3d4"))
node = node.add_main_variation(chess.Move.from_uci("d6e6"))
expected = textwrap.dedent("""\
[Event "?"]
[Site "?"]
[Date "????.??.??"]
[Round "?"]
[White "?"]
[Black "?"]
[Result "*"]
[SetUp "1"]
[FEN "8/8/4k3/8/4P3/4K3/8/8 b - - 0 17"]
17... Kd6 18. Kd4 Ke6 *""")
self.assertEqual(str(game), expected)
def test_result_termination_marker(self):
pgn = StringIO("1. d4 1-0")
game = chess.pgn.read_game(pgn)
self.assertEqual(game.headers["Result"], "1-0")
class StockfishTestCase(unittest.TestCase):
def setUp(self):
try:
self.engine = chess.uci.popen_engine("stockfish")
except OSError:
self.skipTest("need stockfish")
self.engine.uci()
def tearDown(self):
self.engine.quit()
def test_forced_mates(self):
epds = [
"1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b - - bm Qd1+; id \"BK.01\";",
"6k1/N1p3pp/2p5/3n1P2/4K3/1P5P/P1Pr1r2/R1R5 b - - bm Rf4+; id \"Clausthal 2014\";",
]
board = chess.Board()
for epd in epds:
operations = board.set_epd(epd)
self.engine.ucinewgame()
self.engine.position(board)
result = self.engine.go(mate=5)
self.assertTrue(result[0] in operations["bm"], operations["id"])
def test_async(self):
self.engine.ucinewgame()
command = self.engine.go(movetime=1000, async_callback=True)
self.assertFalse(command.done())
command.result()
self.assertTrue(command.done())
def test_async_callback(self):
self.async_callback_called = False
def async_callback(command):
self.async_callback_called = True
command = self.engine.isready(async_callback=async_callback)
# Wait for the command to be executed.
command.result()
self.assertTrue(self.async_callback_called)
self.assertTrue(command.done())
def test_initialization(self):
self.assertTrue("Stockfish" in self.engine.name)
self.assertEqual(self.engine.options["UCI_Chess960"].name, "UCI_Chess960")
self.assertEqual(self.engine.options["uci_Chess960"].type, "check")
self.assertEqual(self.engine.options["UCI_CHESS960"].default, False)
def test_terminate(self):
self.engine.go(infinite=True, async_callback=True)
time.sleep(0.1)
class SpurEngineTestCase(unittest.TestCase):
def setUp(self):
try:
import spur
self.shell = spur.LocalShell()
except ImportError:
self.skipTest("need spur library")
try:
self.engine = chess.uci.spur_spawn_engine(self.shell, ["stockfish"])
except OSError:
self.skipTest("need stockfish")
def test_local_shell(self):
self.engine.uci()
self.engine.ucinewgame()
# Find fools mate.
board = chess.Board()
board.push_san("g4")
board.push_san("e5")
board.push_san("f4")
self.engine.position(board)
bestmove, pondermove = self.engine.go(mate=1, movetime=2000)
self.assertEqual(board.san(bestmove), "Qh4#")
self.engine.quit()
def test_terminate(self):
self.engine.uci()
self.engine.go(infinite=True, async_callback=True)
time.sleep(0.1)
self.engine.terminate()
self.assertFalse(self.engine.is_alive())
def test_kill(self):
self.engine.uci()
self.engine.go(infinite=True, async_callback=True)
time.sleep(0.1)
self.engine.kill()
self.assertFalse(self.engine.is_alive())
def test_async_terminate(self):
command = self.engine.terminate(async_callback=True)
command.result()
self.assertTrue(command.done())
class UciEngineTestCase(unittest.TestCase):
def setUp(self):
self.engine = chess.uci.Engine(chess.uci.MockProcess())
self.mock = self.engine.process
self.mock.expect("uci", ("uciok", ))
self.engine.uci()
self.mock.assert_done()
def tearDown(self):
self.engine.terminate()
self.mock.assert_terminated()
def test_debug(self):
self.mock.expect("debug on")
self.engine.debug(True)
self.mock.assert_done()
self.mock.expect("debug off")
self.engine.debug(False)
self.mock.assert_done()
def test_ponderhit(self):
self.mock.expect("go ponder")
self.mock.expect("isready", ("readyok", ))
ponder_command = self.engine.go(ponder=True, async_callback=True)
self.mock.expect("ponderhit", ("bestmove e2e4", ))
self.engine.ponderhit()
self.assertEqual(ponder_command.result().bestmove, chess.Move.from_uci("e2e4"))
self.mock.assert_done()
def test_kill(self):
self.engine.kill()
self.mock.assert_terminated()
def test_go(self):
self.mock.expect("go infinite searchmoves e2e4 d2d4")
self.mock.expect("isready", ("readyok", ))
go_command = self.engine.go(searchmoves=[chess.Move.from_uci("e2e4"), chess.Move.from_uci("d2d4")], infinite=True, async_callback=True)
self.mock.expect("stop", ("bestmove e2e4", ))
self.engine.stop()
bestmove, pondermove = go_command.result()
self.mock.assert_done()
self.assertEqual(bestmove, chess.Move.from_uci("e2e4"))
self.assertTrue(pondermove is None)
self.mock.expect("go wtime 1 btime 2 winc 3 binc 4 movestogo 5 depth 6 nodes 7 mate 8 movetime 9", (
"bestmove d2d4 ponder d7d5",
))
self.mock.expect("isready", ("readyok", ))
self.engine.go(wtime=1, btime=2, winc=3, binc=4, movestogo=5, depth=6, nodes=7, mate=8, movetime=9)
self.mock.assert_done()
self.mock.expect("go movetime 3333", (
"bestmove (none) ponder (none)",
))
self.mock.expect("isready", ("readyok", ))
bestmove, pondermove = self.engine.go(movetime=3333)
self.assertTrue(bestmove is None)
self.assertTrue(pondermove is None)
self.mock.assert_done()
self.mock.expect("go mate 2", (
"bestmove (none)",
))
self.mock.expect("isready", ("readyok", ))
bestmove, pondermove = self.engine.go(mate=2)
self.assertTrue(bestmove is None)
self.assertTrue(pondermove is None)
self.mock.assert_done()
def test_info_refutation(self):
handler = chess.uci.InfoHandler()
self.engine.info_handlers.append(handler)
# Set a position where d1h5 g6h5 would be a legal sequence of moves.
fen = "8/8/6k1/8/8/8/1K6/3B4 w - - 0 1"
self.mock.expect("position fen " + fen)
self.mock.expect("isready", ("readyok", ))
self.engine.position(chess.Board(fen))
self.engine.on_line_received("info refutation d1h5 g6h5")
d1h5 = chess.Move.from_uci("d1h5")
g6h5 = chess.Move.from_uci("g6h5")
with handler as info:
self.assertEqual(len(info["refutation"][d1h5]), 1)
self.assertEqual(info["refutation"][d1h5][0], g6h5)
self.engine.on_line_received("info refutation d1h5")
with handler as info:
self.assertTrue(info["refutation"][d1h5] is None)
def test_info_string(self):
handler = chess.uci.InfoHandler()
self.engine.info_handlers.append(handler)
self.engine.on_line_received("info string goes to end no matter score cp 4 what")
with handler as info:
self.assertEqual(info["string"], "goes to end no matter score cp 4 what")
self.assertFalse(1 in info["score"])
def test_info_currline(self):
handler = chess.uci.InfoHandler()
self.engine.info_handlers.append(handler)
self.engine.on_line_received("info currline 0 e2e4 e7e5")
with handler as info:
self.assertEqual(info["currline"][0], [
chess.Move.from_uci("e2e4"),
chess.Move.from_uci("e7e5"),
])
self.engine.on_line_received("info currline 1 string eol")
with handler as info:
self.assertEqual(info["currline"][1], [])
def test_mate_score(self):
handler = chess.uci.InfoHandler()
self.engine.info_handlers.append(handler)
self.engine.on_line_received("info depth 7 seldepth 8 score mate 3")
with handler as info:
self.assertEqual(info["score"][1].mate, 3)
self.assertEqual(info["score"][1].cp, None)
def test_info(self):
handler = chess.uci.InfoHandler()
self.engine.info_handlers.append(handler)
self.mock.expect("go", ("bestmove d2d4", ))
self.mock.expect("isready", ("readyok", ))
self.engine.go()
self.engine.on_line_received("info tbhits 123 cpuload 456 hashfull 789")
with handler as info:
self.assertEqual(info["tbhits"], 123)
self.assertEqual(info["cpuload"], 456)
self.assertEqual(info["hashfull"], 789)
self.mock.expect("go", ("bestmove e2e4", ))
self.mock.expect("isready", ("readyok", ))
self.engine.go()
self.assertFalse("tbhits" in handler.info)
self.assertFalse("cpuload" in handler.info)
self.assertFalse("hashfull" in handler.info)
self.engine.on_line_received("info time 987 nodes 654 nps 321")
with handler as info:
self.assertEqual(info["time"], 987)
self.assertEqual(info["nodes"], 654)
self.assertEqual(info["nps"], 321)
self.mock.assert_done()
def test_combo_option(self):
self.engine.on_line_received("option name MyEnum type combo var Abc def var g h")
self.assertEqual(self.engine.options["MyEnum"].type, "combo")
self.assertEqual(self.engine.options["MyEnum"].var, ["Abc def", "g h"])
def test_set_option(self):
self.mock.expect("setoption name Yes value true")
self.mock.expect("setoption name No value false")
self.mock.expect("setoption name Null option value none")
self.mock.expect("setoption name String option value value value")
self.mock.expect("isready", ("readyok", ))
self.engine.setoption(collections.OrderedDict([
("Yes", True),
("No", False),
("Null option", None),
("String option", "value value"),
]))
self.mock.assert_done()
def test_multi_pv(self):
handler = chess.uci.InfoHandler()
self.engine.info_handlers.append(handler)
self.engine.on_line_received("info score cp 777 multipv 13 pv e2e4")
self.engine.on_line_received("info score cp 888 pv d2d4")
with handler as info:
# Principal variations.
self.assertEqual(info["pv"][13][0], chess.Move.from_uci("e2e4"))
self.assertEqual(info["pv"][1][0], chess.Move.from_uci("d2d4"))
# Score is relative to multipv as well.
self.assertEqual(info["score"][13].cp, 777)
self.assertEqual(info["score"][1].cp, 888)
def test_castling_move_conversion(self):
# Setup a position where white can castle on the next move.
fen = "rnbqkbnr/pppppppp/8/8/8/4PN2/PPPPBPPP/RNBQK2R w KQkq - 1 1"
board = chess.Board(fen)
self.mock.expect("position fen " + fen)
self.mock.expect("isready", ("readyok", ))
self.engine.position(board)
self.mock.assert_done()
# Expect the standard castling move notation e1g1 and respond with it.
self.mock.expect("go movetime 70 searchmoves a2a3 e1g1", (
"bestmove e1g1",
))
self.mock.expect("isready", ("readyok", ))
bestmove, pondermove = self.engine.go(movetime=70, searchmoves=[
board.parse_san("a3"),
board.parse_san("O-O"),
])
self.assertTrue(bestmove.from_square, chess.E1)
self.assertTrue(bestmove.to_square, chess.H1)
self.mock.assert_done()
# Assert that we can change to UCI_Chess960 mode.
self.assertFalse(self.engine.uci_chess960)
self.mock.expect("setoption name uCi_CheSS960 value true")
self.mock.expect("isready", ("readyok", ))
self.engine.setoption({"uCi_CheSS960": True})
self.assertTrue(self.engine.uci_chess960)
self.mock.assert_done()
# Expect a Shredder FEN during for the position command.
self.mock.expect("position fen rnbqkbnr/pppppppp/8/8/8/4PN2/PPPPBPPP/RNBQK2R w HAha - 1 1")
self.mock.expect("isready", ("readyok", ))
self.engine.position(board)
self.mock.assert_done()
# Check that castling move conversion is now disabled.
self.mock.expect("go movetime 70 searchmoves a2a3 e1h1", (
"bestmove e1h1",
))
self.mock.expect("isready", ("readyok", ))
bestmove, pondermove = self.engine.go(movetime=70, searchmoves=[
board.parse_san("a3"),
board.parse_san("O-O"),
])
self.assertTrue(bestmove.from_square, chess.E1)
self.assertTrue(bestmove.to_square, chess.H1)
self.mock.assert_done()
def test_castling_ponder(self):
# Setup position.
fen = "rnbqkb1r/pp1ppppp/5n2/2p5/4P3/5N2/PPPPBPPP/RNBQK2R b KQkq - 3 3"
board = chess.Board(fen, chess960=True)
self.mock.expect("position fen " + fen)
self.mock.expect("isready", ("readyok", ))
self.engine.position(board)
# Test castling moves as ponder moves.
self.mock.expect("go depth 15", ("bestmove f6e4 ponder e1g1", ))
self.mock.expect("isready", ("readyok", ))
bestmove, ponder = self.engine.go(depth=15)
self.assertEqual(bestmove, chess.Move.from_uci("f6e4"))
self.assertEqual(ponder, chess.Move.from_uci("e1h1"))
self.mock.assert_done()
def test_invalid_castling_rights(self):
fen = "3qk3/4pp2/5r2/8/8/8/3PP1P1/4K1R1 b G - 0 1"
board = chess.Board(fen, chess960=True)
board.push_san("Rf5")
# White can castle with the G-side rook, which is not possible in
# standard chess. The UCI module should just send the final FEN,
# show a warning and hope for the best.
self.mock.expect("position fen 3qk3/4pp2/8/5r2/8/8/3PP1P1/4K1R1 w K - 1 2")
self.mock.expect("isready", ("readyok", ))
self.engine.position(board)
self.mock.assert_done()
# Activate Chess960 mode.
self.mock.expect("setoption name UCI_Chess960 value true")
self.mock.expect("isready", ("readyok", ))
self.engine.setoption({"UCI_Chess960": True})
# Then those castling rights should work fine.
self.mock.expect("position fen " + fen + " moves f6f5")
self.mock.expect("isready", ("readyok", ))
self.engine.position(board)
self.mock.assert_done()
class UciOptionMapTestCase(unittest.TestCase):
def test_equality(self):
a = chess.uci.OptionMap()
b = chess.uci.OptionMap()
c = chess.uci.OptionMap()
self.assertEqual(a, b)
a["fOO"] = "bAr"
b["foo"] = "bAr"
c["fOo"] = "bar"
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertNotEqual(a, c)
self.assertNotEqual(c, a)
self.assertNotEqual(b, c)
b["hello"] = "world"
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
def test_len(self):
a = chess.uci.OptionMap()
self.assertEqual(len(a), 0)
a["key"] = "value"
self.assertEqual(len(a), 1)
del a["key"]
self.assertEqual(len(a), 0)
class SyzygyTestCase(unittest.TestCase):
def test_calc_key(self):
board = chess.Board("8/8/8/5N2/5K2/2kB4/8/8 b - - 0 1")
key_from_board = chess.syzygy.calc_key(board)
key_from_filename = chess.syzygy.calc_key_from_filename("KBNvK")
self.assertEqual(key_from_board, key_from_filename)
def test_filenames(self):
self.assertTrue("KPPvKN" in chess.syzygy.filenames())
self.assertTrue("KNNPvKN" in chess.syzygy.filenames())
self.assertTrue("KQRNvKR" in chess.syzygy.filenames())
self.assertTrue("KRRRvKR" in chess.syzygy.filenames())
self.assertTrue("KRRvKRR" in chess.syzygy.filenames())
self.assertTrue("KRNvKRP" in chess.syzygy.filenames())
self.assertTrue("KRPvKP" in chess.syzygy.filenames())
def test_probe_pawnless_wdl_table(self):
wdl = chess.syzygy.WdlTable("data/syzygy", "KBNvK")
wdl.init_table_wdl()
board = chess.Board("8/8/8/5N2/5K2/2kB4/8/8 b - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), -2)
board = chess.Board("7B/5kNK/8/8/8/8/8/8 w - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), 2)
board = chess.Board("N7/8/2k5/8/7K/8/8/B7 w - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), 2)
board = chess.Board("8/8/1NkB4/8/7K/8/8/8 w - - 1 1")
self.assertEqual(wdl.probe_wdl_table(board), 0)
board = chess.Board("8/8/8/2n5/2b1K3/2k5/8/8 w - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), -2)
wdl.close()
def test_probe_wdl_table(self):
wdl = chess.syzygy.WdlTable("data/syzygy", "KRvKP")
wdl.init_table_wdl()
board = chess.Board("8/8/2K5/4P3/8/8/8/3r3k b - - 1 1")
self.assertEqual(wdl.probe_wdl_table(board), 0)
board = chess.Board("8/8/2K5/8/4P3/8/8/3r3k b - - 1 1")
self.assertEqual(wdl.probe_wdl_table(board), 2)
wdl.close()
def test_probe_dtz_table_piece(self):
dtz = chess.syzygy.DtzTable("data/syzygy", "KRvKN")
dtz.init_table_dtz()
# Pawnless position with white to move.
board = chess.Board("7n/6k1/4R3/4K3/8/8/8/8 w - - 0 1")
self.assertEqual(dtz.probe_dtz_table(board, 2), (0, -1))
# Same position with black to move.
board = chess.Board("7n/6k1/4R3/4K3/8/8/8/8 b - - 1 1")
self.assertEqual(dtz.probe_dtz_table(board, -2), (8, 1))
dtz.close()
def test_probe_dtz_table_pawn(self):
dtz = chess.syzygy.DtzTable("data/syzygy", "KNvKP")
dtz.init_table_dtz()
board = chess.Board("8/1K6/1P6/8/8/8/6n1/7k w - - 0 1")
self.assertEqual(dtz.probe_dtz_table(board, 2), (2, 1))
dtz.close()
def test_probe_wdl_tablebase(self):
tablebases = chess.syzygy.Tablebases()
self.assertEqual(tablebases.open_directory("data/syzygy"), 70)
# Winning KRvKB.
board = chess.Board("7k/6b1/6K1/8/8/8/8/3R4 b - - 12 7")
self.assertEqual(tablebases.probe_wdl_table(board), -2)
# Drawn KBBvK.
board = chess.Board("7k/8/8/4K3/3B4/4B3/8/8 b - - 12 7")
self.assertEqual(tablebases.probe_wdl_table(board), 0)
# Winning KBBvK.
board = chess.Board("7k/8/8/4K2B/8/4B3/8/8 w - - 12 7")
self.assertEqual(tablebases.probe_wdl_table(board), 2)
tablebases.close()
def test_wdl_ep(self):
tablebases = chess.syzygy.Tablebases("data/syzygy")
# Winning KPvKP because of en passant.
board = chess.Board("8/8/8/k2Pp3/8/8/8/4K3 w - e6 0 2")
# If there was no en passant this would be a draw.
self.assertEqual(tablebases.probe_wdl_table(board), 0)
# But it is a win.
self.assertEqual(tablebases.probe_wdl(board), 2)
tablebases.close()
def test_dtz_ep(self):
tablebases = chess.syzygy.Tablebases("data/syzygy")
board = chess.Board("8/8/8/8/2pP4/2K5/4k3/8 b - d3 0 1")
self.assertEqual(tablebases.probe_dtz_no_ep(board), -1)
self.assertEqual(tablebases.probe_dtz(board), 1)
tablebases.close()
def test_testsuite(self):
tablebases = chess.syzygy.Tablebases("data/syzygy")
board = chess.Board()
with open("data/endgame.epd") as epds:
for line, epd in enumerate(epds):
extra = board.set_epd(epd)
wdl_table = tablebases.probe_wdl_table(board)
self.assertEqual(
wdl_table, extra["wdl_table"],
"Expecting wdl_table {0} for {1}, got {2} (at line {3})".format(extra["wdl_table"], board.fen(), wdl_table, line + 1))
wdl = tablebases.probe_wdl(board)
self.assertEqual(
wdl, extra["wdl"],
"Expecting wdl {0} for {1}, got {2} (at line {3})".format(extra["wdl"], board.fen(), wdl, line + 1))
dtz = tablebases.probe_dtz(board)
self.assertEqual(
dtz, extra["dtz"],
"Expecting dtz {0} for {1}, got {2} (at line {3})".format(extra["dtz"], board.fen(), dtz, line + 1))
tablebases.close()
class GaviotaTestCase(unittest.TestCase):
def setUp(self):
try:
self.tablebases = chess.gaviota.open_tablebases("data/gaviota")
except (OSError, RuntimeError):
self.skipTest("need libgtb")
def tearDown(self):
self.tablebases.close()
def test_probe_dtm(self):
board = chess.Board("6K1/8/8/8/4Q3/8/6k1/8 b - - 0 1")
self.assertEqual(self.tablebases.probe_dtm(board), -14)
board = chess.Board("8/3K4/8/8/8/4r3/4k3/8 b - - 0 1")
self.assertEqual(self.tablebases.probe_dtm(board), 21)
def test_probe_wdl(self):
board = chess.Board("8/8/4K3/2n5/8/3k4/8/8 w - - 0 1")
self.assertEqual(self.tablebases.probe_wdl(board), 0)
board = chess.Board("8/8/1p2K3/8/8/3k4/8/8 b - - 0 1")
self.assertEqual(self.tablebases.probe_wdl(board), 1)
if __name__ == "__main__":
if "-v" in sys.argv or "--verbose" in sys.argv:
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
denim2x/python-chess
|
test.py
|
Python
|
gpl-3.0
| 86,529
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import gtk
import gobject
from NDImplementationDecorator import NDImplementationDecorator
__author__ = 'Shamal Faily'
class DomainPropertyNodeDialog:
def __init__(self,objt,environmentName,dupProperty,overridingEnvironment,builder):
self.window = builder.get_object("DomainPropertyNodeDialog")
self.decorator = NDImplementationDecorator(builder)
self.decorator.updateTextCtrl("domainPropertyNameCtrl",objt.name())
self.decorator.updateTextCtrl("domainPropertyTypeCtrl",objt.type())
self.decorator.updateMLTextCtrl("domainPropertyDescriptionCtrl",objt.description())
self.window.resize(200,200)
def on_roleOkButton_clicked(self,callback_data):
self.window.destroy()
def show(self):
self.window.show()
|
nathanbjenx/cairis
|
cairis/gui/DomainPropertyNodeDialog.py
|
Python
|
apache-2.0
| 1,553
|
import math
import nltk
import time
import sys
# Constants to be used by you when you fill the functions
START_SYMBOL = '*'
STOP_SYMBOL = 'STOP'
MINUS_INFINITY_SENTENCE_LOG_PROB = -1000
log2 = lambda x: math.log(x, 2)
# TODO: IMPLEMENT THIS FUNCTION
# Calculates unigram, bigram, and trigram probabilities given a training corpus
# training_corpus: is a list of the sentences. Each sentence is a string with tokens separated by spaces, ending in a newline character.
# This function outputs three python dictionaries, where the keys are
# tuples expressing the ngram and the value is the log probability of that
# ngram
def calc_probabilities(training_corpus):
"""
this is docstring
"""
# unigram_tuples = []
# bigram_tuples = []
# trigram_tuples = []
unigram_count = {}
bigram_count = {}
trigram_count = {}
unigram_count_pnodes = {}
bigram_count_pnodes = {}
trigram_count_pnodes = {}
unigram_total = 0
bigram_total = 0
trigram_total = 0
print 'total {} sentences'.format(len(training_corpus))
for i in xrange(0, len(training_corpus)):
if i % 3000 == 0:
print 'processing ', i, 'th sentence...'
training_corpus[i] = START_SYMBOL + ' ' + training_corpus[i]
training_corpus[i] = training_corpus[i] + ' ' + STOP_SYMBOL
# training_corpus[i].replace('.',' ' + STOP_SYMBOL)
tokens = training_corpus[i].split()
unigram_tuples_i = list((token,) for token in tokens)
bigram_tuples_i = list(nltk.bigrams(tokens))
trigram_tuples_i = list(nltk.trigrams(tokens))
unigram_total += len(unigram_tuples_i)
bigram_total += len(bigram_tuples_i)
trigram_total += len(trigram_tuples_i)
for item in unigram_tuples_i:
if item in [(START_SYMBOL,)]:
continue
unigram_count.setdefault(item, 0)
unigram_count_pnodes.setdefault(item[0:-1], 0)
unigram_count[item] = unigram_count[item] + 1
unigram_count_pnodes[
item[0:-1]] = unigram_count_pnodes[item[0:-1]] + 1
for item in bigram_tuples_i:
bigram_count.setdefault(item, 0)
bigram_count_pnodes.setdefault(item[0:-1], 0)
bigram_count[item] = bigram_count[item] + 1
bigram_count_pnodes[
item[0:-1]] = bigram_count_pnodes[item[0:-1]] + 1
for item in trigram_tuples_i:
trigram_count.setdefault(item, 0)
trigram_count_pnodes.setdefault(item[0:-1], 0)
trigram_count[item] = trigram_count[item] + 1
trigram_count_pnodes[
item[0:-1]] = trigram_count_pnodes[item[0:-1]] + 1
unigram_p = {
item: math.log(
unigram_count[item],
2) -
math.log(
unigram_count_pnodes[
item[
0:-
1]],
2) for item in set(unigram_count)}
bigram_p = {
item: math.log(
bigram_count[item],
2) -
math.log(
bigram_count_pnodes[
item[
0:-
1]],
2) for item in set(bigram_count)}
trigram_p = {
item: math.log(
trigram_count[item],
2) -
math.log(
trigram_count_pnodes[
item[
0:-
1]],
2) for item in set(trigram_count)}
print "calc_probabilities finished!"
return unigram_p, bigram_p, trigram_p
# Prints the output for q1
# Each input is a python dictionary where keys are a tuple expressing the
# ngram, and the value is the log probability of that ngram
def q1_output(unigrams, bigrams, trigrams, filename):
# output probabilities
outfile = open(filename, 'w')
unigrams_keys = sorted(unigrams.keys())
for unigram in unigrams_keys:
outfile.write('UNIGRAM ' +
unigram[0] +
' ' +
str(unigrams[unigram]) +
'\n')
outfile.flush()
bigrams_keys = sorted(bigrams.keys())
for bigram in bigrams_keys:
outfile.write('BIGRAM ' +
bigram[0] +
' ' +
bigram[1] +
' ' +
str(bigrams[bigram]) +
'\n')
outfile.flush()
trigrams_keys = sorted(trigrams.keys())
for trigram in trigrams_keys:
outfile.write('TRIGRAM ' +
trigram[0] +
' ' +
trigram[1] +
' ' +
trigram[2] +
' ' +
str(trigrams[trigram]) +
'\n')
outfile.flush()
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates scores (log probabilities) for every sentence
# ngram_p: python dictionary of probabilities of uni-, bi- and trigrams.
# n: size of the ngram you want to use to compute probabilities
# corpus: list of sentences to score. Each sentence is a string with tokens separated by spaces, ending in a newline character.
# This function must return a python list of scores, where the first
# element is the score of the first sentence, etc.
def score(ngram_p, n, corpus):
print "scoring corpus for ", n, "-grams"
scores = []
for i, sentence in enumerate(corpus):
ngram_tuples = None
score_i = 0
if i % 10000 == 0:
print 'scoring ', i, 'th sentence...'
tokens = sentence.split()
if n == 1:
ngram_tuples = list([(token,) for token in tokens])
elif n == 2:
ngram_tuples = list(nltk.bigrams(tokens))
elif n == 3:
ngram_tuples = list(nltk.trigrams(tokens))
try:
score_i = sum([ngram_p[gram] for gram in ngram_tuples
if gram not in [(START_SYMBOL,)]])
except KeyError as error:
score_i = MINUS_INFINITY_SENTENCE_LOG_PROB
print 'ngram_tuple ', gram, ' not in dict ', error.message
scores.append(score_i)
return scores
# Outputs a score to a file
# scores: list of scores
# filename: is the output file name
def score_output(scores, filename):
outfile = open(filename, 'w')
for score in scores:
outfile.write(str(score) + '\n')
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates scores (log probabilities) for every sentence with a linearly interpolated model
# Each ngram argument is a python dictionary where the keys are tuples that express an ngram and the value is the log probability of that ngram
# Like score(), this function returns a python list of scores
# TODO: `EM` algorithm to find the optimal weights.
def linearscore(unigrams, bigrams, trigrams, corpus):
scores = []
weights = (1. / 3, 1. / 3, 1. / 3,)
for i, sentence in enumerate(corpus):
if i % 3000 == 0:
print 'linearscore ', i, 'th sentence...'
score_i = 0
tokens = sentence.split()
trigram_tuples = list(nltk.trigrams(tokens))
try:
for trigram in trigram_tuples:
score_i += log2(sum([weights[0] * 2 ** trigrams[trigram[0:]],
weights[1] * 2 ** bigrams[trigram[1:]],
weights[2] * 2 ** unigrams[trigram[2:]],
]))
except KeyError as e:
score_i = MINUS_INFINITY_SENTENCE_LOG_PROB
print i, 'th sentence', 'ngram ', trigram, ' not in dict', e.message
scores.append(score_i)
return scores
DATA_PATH = 'data/'
OUTPUT_PATH = 'output/'
# DO NOT MODIFY THE MAIN FUNCTION
def main():
# start timer
time.clock()
# get data
infile = open(DATA_PATH + 'Brown_train.txt', 'r')
corpus = infile.readlines()
infile.close()
# calculate ngram probabilities (question 1)
unigrams, bigrams, trigrams = calc_probabilities(corpus)
# question 1 output
q1_output(unigrams, bigrams, trigrams, OUTPUT_PATH + 'A1.txt')
# score sentences (question 2)
uniscores = score(unigrams, 1, corpus)
biscores = score(bigrams, 2, corpus)
triscores = score(trigrams, 3, corpus)
# question 2 output
score_output(uniscores, OUTPUT_PATH + 'A2.uni.txt')
score_output(biscores, OUTPUT_PATH + 'A2.bi.txt')
score_output(triscores, OUTPUT_PATH + 'A2.tri.txt')
# linear interpolation (question 3)
linearscores = linearscore(unigrams, bigrams, trigrams, corpus)
# question 3 output
score_output(linearscores, OUTPUT_PATH + 'A3.txt')
# open Sample1 and Sample2 (question 5)
infile = open(DATA_PATH + 'Sample1.txt', 'r')
sample1 = infile.readlines()
infile.close()
infile = open(DATA_PATH + 'Sample2.txt', 'r')
sample2 = infile.readlines()
infile.close()
# score the samples
sample1scores = linearscore(unigrams, bigrams, trigrams, sample1)
sample2scores = linearscore(unigrams, bigrams, trigrams, sample2)
# question 5 output
score_output(sample1scores, OUTPUT_PATH + 'Sample1_scored.txt')
score_output(sample2scores, OUTPUT_PATH + 'Sample2_scored.txt')
# print total time to run Part A
print("Part A time: " + str(time.clock()) + ' sec')
if __name__ == "__main__":
main()
|
Alexoner/mooc
|
coursera/nlpintro-001/Assignment2/solutionsA.py
|
Python
|
apache-2.0
| 9,462
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.