repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
pfilsx/calamares | src/modules/displaymanager/main.py | 2 | 25040 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <http://github.com/calamares> ===
#
# Copyright 2014-2017, Philip Müller <philm@manjaro.org>
# Copyright 2014-2015, Teo Mrnjavac <teo@kde.org>
# Copyright 2014, Kevin Kofler <kevin.kofler@chello.at>
# Copyright 2017, Alf Gaida <agaida@siduction.org>
# Copyright 2017, Bernhard Landauer <oberon@manjaro.org>
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import os
import collections
import re
import libcalamares
import configparser
DesktopEnvironment = collections.namedtuple(
'DesktopEnvironment', ['executable', 'desktop_file']
)
desktop_environments = [
DesktopEnvironment('/usr/bin/startkde', 'plasma'), # KDE Plasma 5
DesktopEnvironment('/usr/bin/startkde', 'kde-plasma'), # KDE Plasma 4
DesktopEnvironment('/usr/bin/gnome-session', 'gnome'),
DesktopEnvironment('/usr/bin/startxfce4', 'xfce'),
DesktopEnvironment('/usr/bin/cinnamon-session-cinnamon', 'cinnamon'),
DesktopEnvironment('/usr/bin/mate-session', 'mate'),
DesktopEnvironment('/usr/bin/enlightenment_start', 'enlightenment'),
DesktopEnvironment('/usr/bin/lxsession', 'LXDE'),
DesktopEnvironment('/usr/bin/startlxde', 'LXDE'),
DesktopEnvironment('/usr/bin/lxqt-session', 'lxqt'),
DesktopEnvironment('/usr/bin/pekwm', 'pekwm'),
DesktopEnvironment('/usr/bin/pantheon-session', 'pantheon'),
DesktopEnvironment('/usr/bin/budgie-session', 'budgie-session'),
DesktopEnvironment('/usr/bin/budgie-desktop', 'budgie-desktop'),
DesktopEnvironment('/usr/bin/i3', 'i3'),
DesktopEnvironment('/usr/bin/startdde', 'deepin'),
DesktopEnvironment('/usr/bin/openbox-session', 'openbox')
]
def find_desktop_environment(root_mount_point):
"""
Checks which desktop environment is currently installed.
:param root_mount_point:
:return:
"""
for desktop_environment in desktop_environments:
if (os.path.exists("{!s}{!s}".format(
root_mount_point, desktop_environment.executable
)
) and os.path.exists(
"{!s}/usr/share/xsessions/{!s}.desktop".format(
root_mount_point, desktop_environment.desktop_file
)
)):
return desktop_environment
return None
def have_dm(dm_name, root_mount_point):
"""
Checks if display manager is properly installed.
:param dm_name:
:param root_mount_point:
:return:
"""
bin_path = "{!s}/usr/bin/{!s}".format(root_mount_point, dm_name)
sbin_path = "{!s}/usr/sbin/{!s}".format(root_mount_point, dm_name)
return (os.path.exists(bin_path)
or os.path.exists(sbin_path)
)
def set_autologin(username,
displaymanager,
default_desktop_environment,
root_mount_point):
"""
Enables automatic login for the installed desktop managers.
:param username:
:param displaymanager: str
The displaymanager for which to configure autologin.
:param default_desktop_environment:
:param root_mount_point:
"""
do_autologin = True
if username is None:
do_autologin = False
if "mdm" == displaymanager:
# Systems with MDM as Desktop Manager
mdm_conf_path = os.path.join(root_mount_point, "etc/mdm/custom.conf")
if os.path.exists(mdm_conf_path):
with open(mdm_conf_path, 'r') as mdm_conf:
text = mdm_conf.readlines()
with open(mdm_conf_path, 'w') as mdm_conf:
for line in text:
if '[daemon]' in line:
if do_autologin:
line = (
"[daemon]\n"
"AutomaticLogin={!s}\n"
"AutomaticLoginEnable=True\n".format(username)
)
else:
line = (
"[daemon]\n"
"AutomaticLoginEnable=False\n"
)
mdm_conf.write(line)
else:
with open(mdm_conf_path, 'w') as mdm_conf:
mdm_conf.write(
'# Calamares - Configure automatic login for user\n'
)
mdm_conf.write('[daemon]\n')
if do_autologin:
mdm_conf.write("AutomaticLogin={!s}\n".format(username))
mdm_conf.write('AutomaticLoginEnable=True\n')
else:
mdm_conf.write('AutomaticLoginEnable=False\n')
if "gdm" == displaymanager:
# Systems with GDM as Desktop Manager
gdm_conf_path = os.path.join(root_mount_point, "etc/gdm/custom.conf")
if os.path.exists(gdm_conf_path):
with open(gdm_conf_path, 'r') as gdm_conf:
text = gdm_conf.readlines()
with open(gdm_conf_path, 'w') as gdm_conf:
for line in text:
if '[daemon]' in line:
if do_autologin:
line = (
"[daemon]\n"
"AutomaticLogin={!s}\n"
"AutomaticLoginEnable=True\n".format(username)
)
else:
line = "[daemon]\nAutomaticLoginEnable=False\n"
gdm_conf.write(line)
else:
with open(gdm_conf_path, 'w') as gdm_conf:
gdm_conf.write(
'# Calamares - Enable automatic login for user\n'
)
gdm_conf.write('[daemon]\n')
if do_autologin:
gdm_conf.write("AutomaticLogin={!s}\n".format(username))
gdm_conf.write('AutomaticLoginEnable=True\n')
else:
gdm_conf.write('AutomaticLoginEnable=False\n')
if (do_autologin):
accountservice_dir = "{!s}/var/lib/AccountsService/users".format(
root_mount_point
)
userfile_path = "{!s}/{!s}".format(accountservice_dir, username)
if os.path.exists(accountservice_dir):
with open(userfile_path, "w") as userfile:
userfile.write("[User]\n")
if default_desktop_environment is not None:
userfile.write("XSession={!s}\n".format(
default_desktop_environment.desktop_file))
userfile.write("Icon=\n")
if "kdm" == displaymanager:
# Systems with KDM as Desktop Manager
kdm_conf_path = os.path.join(
root_mount_point, "usr/share/config/kdm/kdmrc"
)
# Check which path is in use: SUSE does something else.
# Also double-check the default setting. Pick the first
# one that exists in the target.
for candidate_kdmrc in (
"usr/share/config/kdm/kdmrc",
"usr/share/kde4/config/kdm/kdmrc",
):
p = os.path.join(root_mount_point, candidate_kdmrc)
if os.path.exists(p):
kdm_conf_path = p
break
text = []
if os.path.exists(kdm_conf_path):
with open(kdm_conf_path, 'r') as kdm_conf:
text = kdm_conf.readlines()
with open(kdm_conf_path, 'w') as kdm_conf:
for line in text:
if 'AutoLoginEnable=' in line:
if do_autologin:
line = 'AutoLoginEnable=true\n'
else:
line = 'AutoLoginEnable=false\n'
if do_autologin and 'AutoLoginUser=' in line:
line = "AutoLoginUser={!s}\n".format(username)
kdm_conf.write(line)
else:
return (
"Cannot write KDM configuration file",
"KDM config file {!s} does not exist".format(kdm_conf_path)
)
if "lxdm" == displaymanager:
# Systems with LXDM as Desktop Manager
lxdm_conf_path = os.path.join(root_mount_point, "etc/lxdm/lxdm.conf")
text = []
if os.path.exists(lxdm_conf_path):
with open(lxdm_conf_path, 'r') as lxdm_conf:
text = lxdm_conf.readlines()
with open(lxdm_conf_path, 'w') as lxdm_conf:
for line in text:
if 'autologin=' in line:
if do_autologin:
line = "autologin={!s}\n".format(username)
else:
line = "# autologin=\n"
lxdm_conf.write(line)
else:
return (
"Cannot write LXDM configuration file",
"LXDM config file {!s} does not exist".format(lxdm_conf_path)
)
if "lightdm" == displaymanager:
# Systems with LightDM as Desktop Manager
# Ideally, we should use configparser for the ini conf file,
# but we just do a simple text replacement for now, as it
# worksforme(tm)
lightdm_conf_path = os.path.join(
root_mount_point, "etc/lightdm/lightdm.conf"
)
text = []
if os.path.exists(lightdm_conf_path):
with open(lightdm_conf_path, 'r') as lightdm_conf:
text = lightdm_conf.readlines()
with open(lightdm_conf_path, 'w') as lightdm_conf:
for line in text:
if 'autologin-user=' in line:
if do_autologin:
line = "autologin-user={!s}\n".format(username)
else:
line = "#autologin-user=\n"
lightdm_conf.write(line)
else:
return (
"Cannot write LightDM configuration file",
"LightDM config file {!s} does not exist".format(
lightdm_conf_path
)
)
if "slim" == displaymanager:
# Systems with Slim as Desktop Manager
slim_conf_path = os.path.join(root_mount_point, "etc/slim.conf")
text = []
if os.path.exists(slim_conf_path):
with open(slim_conf_path, 'r') as slim_conf:
text = slim_conf.readlines()
with open(slim_conf_path, 'w') as slim_conf:
for line in text:
if 'auto_login' in line:
if do_autologin:
line = 'auto_login yes\n'
else:
line = 'auto_login no\n'
if do_autologin and 'default_user' in line:
line = "default_user {!s}\n".format(username)
slim_conf.write(line)
else:
return (
"Cannot write SLIM configuration file",
"SLIM config file {!s} does not exist".format(slim_conf_path)
)
if "sddm" == displaymanager:
# Systems with Sddm as Desktop Manager
sddm_conf_path = os.path.join(root_mount_point, "etc/sddm.conf")
sddm_config = configparser.ConfigParser(strict=False)
# Make everything case sensitive
sddm_config.optionxform = str
if os.path.isfile(sddm_conf_path):
sddm_config.read(sddm_conf_path)
if 'Autologin' not in sddm_config:
sddm_config.add_section('Autologin')
if do_autologin:
sddm_config.set('Autologin', 'User', username)
elif sddm_config.has_option('Autologin', 'User'):
sddm_config.remove_option('Autologin', 'User')
if default_desktop_environment is not None:
sddm_config.set(
'Autologin',
'Session',
default_desktop_environment.desktop_file
)
with open(sddm_conf_path, 'w') as sddm_config_file:
sddm_config.write(sddm_config_file, space_around_delimiters=False)
return None
def run():
"""
Configure display managers.
We acquire a list of displaymanagers, either from config or (overridden)
from globalstorage. This module will try to set up (including autologin)
all the displaymanagers in the list, in that specific order. Most distros
will probably only ship one displaymanager.
If a displaymanager is in the list but not installed, a debugging message
is printed and the entry ignored.
"""
if "displaymanagers" in libcalamares.job.configuration:
displaymanagers = libcalamares.job.configuration["displaymanagers"]
if libcalamares.globalstorage.contains("displayManagers"):
displaymanagers = libcalamares.globalstorage.value("displayManagers")
if displaymanagers is None:
return (
"No display managers selected for the displaymanager module.",
"The displaymanagers list is empty or undefined in both"
"globalstorage and displaymanager.conf."
)
username = libcalamares.globalstorage.value("autologinUser")
root_mount_point = libcalamares.globalstorage.value("rootMountPoint")
if "default_desktop_environment" in libcalamares.job.configuration:
entry = libcalamares.job.configuration["defaultDesktopEnvironment"]
default_desktop_environment = DesktopEnvironment(
entry["executable"], entry["desktopFile"]
)
else:
default_desktop_environment = find_desktop_environment(
root_mount_point
)
if "basicSetup" in libcalamares.job.configuration:
enable_basic_setup = libcalamares.job.configuration["basicSetup"]
else:
enable_basic_setup = False
# Setup slim
if "slim" in displaymanagers:
if not have_dm("slim", root_mount_point):
libcalamares.utils.debug("slim selected but not installed")
displaymanagers.remove("slim")
# Setup sddm
if "sddm" in displaymanagers:
if not have_dm("sddm", root_mount_point):
libcalamares.utils.debug("sddm selected but not installed")
displaymanagers.remove("sddm")
# setup lightdm
if "lightdm" in displaymanagers:
if have_dm("lightdm", root_mount_point):
lightdm_conf_path = os.path.join(
root_mount_point, "etc/lightdm/lightdm.conf"
)
if enable_basic_setup:
libcalamares.utils.target_env_call(
['mkdir', '-p', '/run/lightdm']
)
if libcalamares.utils.target_env_call(
['getent', 'group', 'lightdm']
) != 0:
libcalamares.utils.target_env_call(
['groupadd', '-g', '620', 'lightdm']
)
if libcalamares.utils.target_env_call(
['getent', 'passwd', 'lightdm']
) != 0:
libcalamares.utils.target_env_call(
['useradd', '-c',
'"LightDM Display Manager"',
'-u', '620',
'-g', 'lightdm',
'-d', '/var/run/lightdm',
'-s', '/usr/bin/nologin',
'lightdm'
]
)
libcalamares.utils.target_env_call('passwd', '-l', 'lightdm')
libcalamares.utils.target_env_call(
['chown', '-R', 'lightdm:lightdm', '/run/lightdm']
)
libcalamares.utils.target_env_call(
['chmod', '+r' '/etc/lightdm/lightdm.conf']
)
if default_desktop_environment is not None:
os.system(
"sed -i -e \"s/^.*user-session=.*/user-session={!s}/\" "
"{!s}".format(
default_desktop_environment.desktop_file,
lightdm_conf_path
)
)
# configure lightdm-greeter
greeter_path = os.path.join(
root_mount_point, "usr/share/xgreeters"
)
if (os.path.exists(greeter_path)):
greeter_configured = False
# configure first found lightdm-greeter
for entry in os.scandir(greeter_path):
if entry.name.endswith('.desktop') \
and not greeter_configured:
greeter = entry.name.split('.')[0]
libcalamares.utils.debug(
"found greeter {!s}".format(greeter)
)
os.system(
"sed -i -e \"s/^.*greeter-session=.*"
"/greeter-session={!s}/\" {!s}".format(
greeter,
lightdm_conf_path
)
)
libcalamares.utils.debug(
"{!s} configured as greeter.".format(greeter)
)
greeter_configured = True
if not greeter_configured:
return ("No lightdm greeter installed.")
else:
libcalamares.utils.debug("lightdm selected but not installed")
displaymanagers.remove("lightdm")
# Setup gdm
if "gdm" in displaymanagers:
if have_dm("gdm", root_mount_point):
if enable_basic_setup:
if libcalamares.utils.target_env_call(
['getent', 'group', 'gdm']
) != 0:
libcalamares.utils.target_env_call(
['groupadd', '-g', '120', 'gdm']
)
if libcalamares.utils.target_env_call(
['getent', 'passwd', 'gdm']
) != 0:
libcalamares.utils.target_env_call(
['useradd',
'-c', '"Gnome Display Manager"',
'-u', '120',
'-g', 'gdm',
'-d', '/var/lib/gdm',
'-s', '/usr/bin/nologin',
'gdm'
]
)
libcalamares.utils.target_env_call(
['passwd', '-l', 'gdm']
)
libcalamares.utils.target_env_call(
['chown', '-R', 'gdm:gdm', '/var/lib/gdm']
)
else:
libcalamares.utils.debug("gdm selected but not installed")
displaymanagers.remove("gdm")
# Setup mdm
if "mdm" in displaymanagers:
if have_dm("mdm", root_mount_point):
if enable_basic_setup:
if libcalamares.utils.target_env_call(
['getent', 'group', 'mdm']
) != 0:
libcalamares.utils.target_env_call(
['groupadd', '-g', '128', 'mdm']
)
if libcalamares.utils.target_env_call(
['getent', 'passwd', 'mdm']
) != 0:
libcalamares.utils.target_env_call(
['useradd',
'-c', '"Linux Mint Display Manager"',
'-u', '128',
'-g', 'mdm',
'-d', '/var/lib/mdm',
'-s', '/usr/bin/nologin',
'mdm'
]
)
libcalamares.utils.target_env_call(
['passwd', '-l', 'mdm']
)
libcalamares.utils.target_env_call(
['chown', 'root:mdm', '/var/lib/mdm']
)
libcalamares.utils.target_env_call(
['chmod', '1770', '/var/lib/mdm']
)
if default_desktop_environment is not None:
os.system(
"sed -i \"s|default.desktop|{!s}.desktop|g\" "
"{!s}/etc/mdm/custom.conf".format(
default_desktop_environment.desktop_file,
root_mount_point
)
)
else:
libcalamares.utils.debug("mdm selected but not installed")
displaymanagers.remove("mdm")
# Setup lxdm
if "lxdm" in displaymanagers:
if have_dm("lxdm", root_mount_point):
if enable_basic_setup:
if libcalamares.utils.target_env_call(
['getent', 'group', 'lxdm']
) != 0:
libcalamares.utils.target_env_call(
['groupadd', '--system', 'lxdm']
)
libcalamares.utils.target_env_call(
['chgrp', '-R', 'lxdm', '/var/lib/lxdm']
)
libcalamares.utils.target_env_call(
['chgrp', 'lxdm', '/etc/lxdm/lxdm.conf']
)
libcalamares.utils.target_env_call(
['chmod', '+r', '/etc/lxdm/lxdm.conf']
)
if default_desktop_environment is not None:
os.system(
"sed -i -e \"s|^.*session=.*|session={!s}|\" "
"{!s}/etc/lxdm/lxdm.conf".format(
default_desktop_environment.executable,
root_mount_point
)
)
else:
libcalamares.utils.debug("lxdm selected but not installed")
displaymanagers.remove("lxdm")
# Setup kdm
if "kdm" in displaymanagers:
if have_dm("kdm", root_mount_point):
if enable_basic_setup:
if libcalamares.utils.target_env_call(
['getent', 'group', 'kdm']
) != 0:
libcalamares.utils.target_env_call(
['groupadd', '-g', '135', 'kdm']
)
if libcalamares.utils.target_env_call(
['getent', 'passwd', 'kdm']
) != 0:
libcalamares.utils.target_env_call(
['useradd',
'-u', '135',
'-g', 'kdm',
'-d', '/var/lib/kdm',
'-s', '/bin/false',
'-r',
'-M',
'kdm'
]
)
libcalamares.utils.target_env_call(
['chown', '-R', '135:135', 'var/lib/kdm']
)
else:
libcalamares.utils.debug("kdm selected but not installed")
displaymanagers.remove("kdm")
if username is not None:
libcalamares.utils.debug(
"Setting up autologin for user {!s}.".format(username)
)
else:
libcalamares.utils.debug("Unsetting autologin.")
libcalamares.globalstorage.insert("displayManagers", displaymanagers)
dm_setup_message = []
for dm in displaymanagers:
dm_message = set_autologin(
username, dm,
default_desktop_environment,
root_mount_point
)
if dm_message is not None:
dm_setup_message.append("{!s}: {!s}".format(*dm_message))
if dm_setup_message:
return ("Display manager configuration was incomplete",
"\n".join(dm_setup_message))
| gpl-3.0 |
milinbhakta/flaskjinja | flask1/Lib/site-packages/flask/testsuite/reqctx.py | 557 | 5960 | # -*- coding: utf-8 -*-
"""
flask.testsuite.reqctx
~~~~~~~~~~~~~~~~~~~~~~
Tests the request context.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
try:
from greenlet import greenlet
except ImportError:
greenlet = None
from flask.testsuite import FlaskTestCase
class RequestContextTestCase(FlaskTestCase):
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_true(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_false(flask.request)
self.assert_false(flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_true(flask.request)
self.assert_true(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_true(0, 'expected runtime error')
def test_greenlet_context_copying(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
def g():
self.assert_false(flask.request)
self.assert_false(flask.current_app)
with reqctx:
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
self.assert_false(flask.request)
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
def test_greenlet_context_copying_api(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
@flask.copy_current_request_context
def g():
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
# Disable test if we don't have greenlets available
if greenlet is None:
test_greenlet_context_copying = None
test_greenlet_context_copying_api = None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestContextTestCase))
return suite
| gpl-2.0 |
tysonclugg/django | tests/validation/test_picklable.py | 576 | 2010 | import pickle
from unittest import TestCase
from django.core.exceptions import ValidationError
class PickableValidationErrorTestCase(TestCase):
def test_validationerror_is_picklable(self):
original = ValidationError('a', code='something')
unpickled = pickle.loads(pickle.dumps(original))
self.assertIs(unpickled, unpickled.error_list[0])
self.assertEqual(original.message, unpickled.message)
self.assertEqual(original.code, unpickled.code)
original = ValidationError('a', code='something')
unpickled = pickle.loads(pickle.dumps(ValidationError(original)))
self.assertIs(unpickled, unpickled.error_list[0])
self.assertEqual(original.message, unpickled.message)
self.assertEqual(original.code, unpickled.code)
original = ValidationError(['a', 'b'])
unpickled = pickle.loads(pickle.dumps(original))
self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message)
self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message)
original = ValidationError(['a', 'b'])
unpickled = pickle.loads(pickle.dumps(ValidationError(original)))
self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message)
self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message)
original = ValidationError([ValidationError('a'), ValidationError('b')])
unpickled = pickle.loads(pickle.dumps(original))
self.assertIs(unpickled.args[0][0], unpickled.error_list[0])
self.assertEqual(original.error_list[0].message, unpickled.error_list[0].message)
self.assertEqual(original.error_list[1].message, unpickled.error_list[1].message)
message_dict = {'field1': ['a', 'b'], 'field2': ['c', 'd']}
original = ValidationError(message_dict)
unpickled = pickle.loads(pickle.dumps(original))
self.assertEqual(unpickled.message_dict, message_dict)
| bsd-3-clause |
ric2b/Vivaldi-browser | chromium/testing/scripts/run_devtools_check.py | 10 | 2038 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a python script under an isolate
This script attempts to emulate the contract of gtest-style tests
invoked via recipes. The main contract is that the caller passes the
argument:
--isolated-script-test-output=[FILENAME]
json is written to that file in the format produced by
common.parse_common_test_results.
This script is intended to be the base command invoked by the isolate,
followed by a subsequent Python script."""
import argparse
import json
import os
import sys
import common
# Add src/testing/ into sys.path for importing xvfb.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import xvfb
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--isolated-script-test-output', type=str,
required=True)
args, rest_args = parser.parse_known_args()
# Remove the isolated script extra args this script doesn't care about.
should_ignore_arg = lambda arg: any(to_ignore in arg for to_ignore in (
'--isolated-script-test-chartjson-output',
'--isolated-script-test-perf-output',
'--isolated-script-test-filter',
))
rest_args = [arg for arg in rest_args if not should_ignore_arg(arg)]
ret = common.run_command([sys.executable] + rest_args)
with open(args.isolated_script_test_output, 'w') as fp:
json.dump({'valid': True,
'failures': ['failed'] if ret else []}, fp)
return ret
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([''], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
sys.exit(main())
| bsd-3-clause |
yephper/django | tests/generic_relations_regress/tests.py | 1 | 23296 | <<<<<<< HEAD
from django.db.models import Q, Sum
from django.db.models.deletion import ProtectedError
from django.db.utils import IntegrityError
from django.forms.models import modelform_factory
from django.test import TestCase, skipIfDBFeature
from .models import (
A, B, C, D, Address, Board, CharLink, Company, Contact, Content, Developer,
Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2,
Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink,
)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
Test that GenericRelations on inherited classes use the correct content
type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
Test that the correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name='Chef')
Address.objects.create(street='123 Anywhere Place',
city='Conifer', state='CO',
zipcode='80433', content_object=p)
qs = Person.objects.filter(addresses__zipcode='80433')
self.assertEqual(1, qs.count())
self.assertEqual('Chef', qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name='clink')
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name='tlink')
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_q_object_or(self):
"""
Tests that SQL query parameters for generic relations are properly
grouped when OR is used.
Test for bug http://code.djangoproject.com/ticket/11535
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note='note', content_object=note_contact)
org = Organization.objects.create(name='org name')
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
Q(organizations__name__icontains=r'org name'))
self.assertIn(org_contact, qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r'org name') |
Q(notes__note__icontains=r'other note'))
self.assertIn(org_contact, qs)
def test_join_reuse(self):
qs = Person.objects.filter(
addresses__street='foo'
).filter(
addresses__street='bar'
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_generic_relation_ordering(self):
"""
Test that ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by('links__id'))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_is_unsaved(self):
"""Test related to #13085"""
# Fails with another, ORM-level error
dev1 = Developer(name='Joe')
note = Note(note='Deserves promotion', content_object=dev1)
with self.assertRaises(IntegrityError):
note.save()
def test_target_model_len_zero(self):
"""Test for #13085 -- __len__() returns 0"""
team1 = Team.objects.create(name='Backend devs')
try:
note = Note(note='Deserve a bonus', content_object=team1)
except Exception as e:
if (issubclass(type(e), Exception) and
str(e) == 'Impossible arguments to GFK.get_content_type!'):
self.fail("Saving model with GenericForeignKey to model instance whose "
"__len__ method returns 0 shouldn't fail.")
raise e
note.save()
def test_target_model_nonzero_false(self):
"""Test related to #13085"""
# __nonzero__() returns False -- This actually doesn't currently fail.
# This test validates that
g1 = Guild.objects.create(name='First guild')
note = Note(note='Note for guild', content_object=g1)
note.save()
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name='')
tag = Tag(label='VP', content_object=b1)
tag.save()
def test_ticket_20378(self):
# Create a couple of extra HasLinkThing so that the autopk value
# isn't the same for Link and HasLinkThing.
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
hs3 = HasLinkThing.objects.create()
hs4 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs3)
l2 = Link.objects.create(content_object=hs4)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l1),
[hs3], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l2),
[hs4], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l2),
[hs1, hs2, hs3], lambda x: x, ordered=False)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l1),
[hs1, hs2, hs4], lambda x: x, ordered=False)
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
C.objects.filter(b__a__flag=None),
[c1, c3], lambda x: x
)
self.assertQuerysetEqual(
C.objects.exclude(b__a__flag=None),
[c2], lambda x: x
)
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
D.objects.exclude(b__a__flag=None),
[d2], lambda x: x
)
self.assertQuerysetEqual(
D.objects.filter(b__a__flag=None),
[d1, d3, d4], lambda x: x
)
self.assertQuerysetEqual(
B.objects.filter(a__flag=None),
[b1, b3], lambda x: x
)
self.assertQuerysetEqual(
B.objects.exclude(a__flag=None),
[b2], lambda x: x
)
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
def test_annotate(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
HasLinkThing.objects.create()
b = Board.objects.create(name=str(hs1.pk))
Link.objects.create(content_object=hs2)
l = Link.objects.create(content_object=hs1)
Link.objects.create(content_object=b)
qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
# If content_type restriction isn't in the query's join condition,
# then wrong results are produced here as the link to b will also match
# (b and hs1 have equal pks).
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].links__sum, l.id)
l.delete()
# Now if we don't have proper left join, we will not produce any
# results at all here.
# clear cached results
qs = qs.all()
self.assertEqual(qs.count(), 1)
# Note - 0 here would be a nicer result...
self.assertIs(qs[0].links__sum, None)
# Finally test that filtering works.
self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
def test_filter_targets_related_pk(self):
HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
l = Link.objects.create(content_object=hs2)
self.assertNotEqual(l.object_id, l.pk)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l.pk),
[hs2], lambda x: x)
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
form = GenericRelationForm()
self.assertIn('links', form.fields)
form = GenericRelationForm({'links': None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field('links')
self.assertEqual(links.save_form_data_calls, 1)
def test_ticket_22998(self):
related = Related.objects.create()
content = Content.objects.create(related_obj=related)
Node.objects.create(content=content)
# deleting the Related cascades to the Content cascades to the Node,
# where the pre_delete signal should fire and prevent deletion.
with self.assertRaises(ProtectedError):
related.delete()
def test_ticket_22982(self):
place = Place.objects.create(name='My Place')
self.assertIn('GenericRelatedObjectManager', str(place.links))
=======
from django.db.models import Q, Sum
from django.db.models.deletion import ProtectedError
from django.db.utils import IntegrityError
from django.forms.models import modelform_factory
from django.test import TestCase, skipIfDBFeature
from .models import (
A, Address, B, Board, C, CharLink, Company, Contact, Content, D, Developer,
Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2,
Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink,
)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
Test that GenericRelations on inherited classes use the correct content
type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
Test that the correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name='Chef')
Address.objects.create(street='123 Anywhere Place',
city='Conifer', state='CO',
zipcode='80433', content_object=p)
qs = Person.objects.filter(addresses__zipcode='80433')
self.assertEqual(1, qs.count())
self.assertEqual('Chef', qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name='clink')
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name='tlink')
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_q_object_or(self):
"""
Tests that SQL query parameters for generic relations are properly
grouped when OR is used.
Test for bug http://code.djangoproject.com/ticket/11535
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note='note', content_object=note_contact)
org = Organization.objects.create(name='org name')
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
Q(organizations__name__icontains=r'org name'))
self.assertIn(org_contact, qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r'org name') |
Q(notes__note__icontains=r'other note'))
self.assertIn(org_contact, qs)
def test_join_reuse(self):
qs = Person.objects.filter(
addresses__street='foo'
).filter(
addresses__street='bar'
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_generic_relation_ordering(self):
"""
Test that ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by('links__id'))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_is_unsaved(self):
"""Test related to #13085"""
# Fails with another, ORM-level error
dev1 = Developer(name='Joe')
note = Note(note='Deserves promotion', content_object=dev1)
with self.assertRaises(IntegrityError):
note.save()
def test_target_model_len_zero(self):
"""Test for #13085 -- __len__() returns 0"""
team1 = Team.objects.create(name='Backend devs')
try:
note = Note(note='Deserve a bonus', content_object=team1)
except Exception as e:
if (issubclass(type(e), Exception) and
str(e) == 'Impossible arguments to GFK.get_content_type!'):
self.fail("Saving model with GenericForeignKey to model instance whose "
"__len__ method returns 0 shouldn't fail.")
raise e
note.save()
def test_target_model_nonzero_false(self):
"""Test related to #13085"""
# __nonzero__() returns False -- This actually doesn't currently fail.
# This test validates that
g1 = Guild.objects.create(name='First guild')
note = Note(note='Note for guild', content_object=g1)
note.save()
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name='')
tag = Tag(label='VP', content_object=b1)
tag.save()
def test_ticket_20378(self):
# Create a couple of extra HasLinkThing so that the autopk value
# isn't the same for Link and HasLinkThing.
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
hs3 = HasLinkThing.objects.create()
hs4 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs3)
l2 = Link.objects.create(content_object=hs4)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l1),
[hs3], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l2),
[hs4], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l2),
[hs1, hs2, hs3], lambda x: x, ordered=False)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l1),
[hs1, hs2, hs4], lambda x: x, ordered=False)
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
C.objects.filter(b__a__flag=None),
[c1, c3], lambda x: x
)
self.assertQuerysetEqual(
C.objects.exclude(b__a__flag=None),
[c2], lambda x: x
)
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
D.objects.exclude(b__a__flag=None),
[d2], lambda x: x
)
self.assertQuerysetEqual(
D.objects.filter(b__a__flag=None),
[d1, d3, d4], lambda x: x
)
self.assertQuerysetEqual(
B.objects.filter(a__flag=None),
[b1, b3], lambda x: x
)
self.assertQuerysetEqual(
B.objects.exclude(a__flag=None),
[b2], lambda x: x
)
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
def test_annotate(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
HasLinkThing.objects.create()
b = Board.objects.create(name=str(hs1.pk))
Link.objects.create(content_object=hs2)
l = Link.objects.create(content_object=hs1)
Link.objects.create(content_object=b)
qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
# If content_type restriction isn't in the query's join condition,
# then wrong results are produced here as the link to b will also match
# (b and hs1 have equal pks).
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].links__sum, l.id)
l.delete()
# Now if we don't have proper left join, we will not produce any
# results at all here.
# clear cached results
qs = qs.all()
self.assertEqual(qs.count(), 1)
# Note - 0 here would be a nicer result...
self.assertIs(qs[0].links__sum, None)
# Finally test that filtering works.
self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
def test_filter_targets_related_pk(self):
HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
l = Link.objects.create(content_object=hs2)
self.assertNotEqual(l.object_id, l.pk)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l.pk),
[hs2], lambda x: x)
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
form = GenericRelationForm()
self.assertIn('links', form.fields)
form = GenericRelationForm({'links': None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field('links')
self.assertEqual(links.save_form_data_calls, 1)
def test_ticket_22998(self):
related = Related.objects.create()
content = Content.objects.create(related_obj=related)
Node.objects.create(content=content)
# deleting the Related cascades to the Content cascades to the Node,
# where the pre_delete signal should fire and prevent deletion.
with self.assertRaises(ProtectedError):
related.delete()
def test_ticket_22982(self):
place = Place.objects.create(name='My Place')
self.assertIn('GenericRelatedObjectManager', str(place.links))
>>>>>>> 6448873197fa4e3df3f5f03201538dc57d7643d6
| bsd-3-clause |
horance-liu/tensorflow | tensorflow/python/estimator/run_config.py | 6 | 22046 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Environment configuration object for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
_USE_DEFAULT = object()
# A list of the property names in RunConfig that the user is allowed to change.
_DEFAULT_REPLACEABLE_LIST = [
'model_dir',
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps'
]
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
_TF_CONFIG_ENV = 'TF_CONFIG'
_TASK_ENV_KEY = 'task'
_TASK_TYPE_KEY = 'type'
_TASK_ID_KEY = 'index'
_CLUSTER_KEY = 'cluster'
_SERVICE_KEY = 'service'
_LOCAL_MASTER = ''
_GRPC_SCHEME = 'grpc://'
def _get_master(cluster_spec, task_type, task_id):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_get_master` does not expect empty cluster_spec.')
jobs = cluster_spec.jobs
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
if task_type not in jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if not 0 <= task_id < len(addresses):
raise ValueError(
'%d is not a valid task_id for task_type %s in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return _GRPC_SCHEME + addresses[task_id]
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_count_ps` does not expect empty cluster_spec.')
return len(cluster_spec.as_dict().get(TaskType.PS, []))
def _count_worker(cluster_spec, chief_task_type):
"""Counts the number of workers (including chief) in cluster_spec."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_count_worker` does not expect empty cluster_spec.')
return (len(cluster_spec.as_dict().get(TaskType.WORKER, [])) +
len(cluster_spec.as_dict().get(chief_task_type, [])))
def _validate_service(service):
"""Validates the service key."""
if service is not None and not isinstance(service, dict):
raise TypeError(
'If "service" is set in TF_CONFIG, it must be a dict. Given %s' %
type(service))
return service
def _validate_task_type_and_task_id(cluster_spec, task_env, chief_task_type):
"""Validates the task type and index in `task_env` according to cluster."""
if chief_task_type not in cluster_spec.jobs:
raise ValueError(
'If "cluster" is set in TF_CONFIG, it must have one "%s" node.' %
chief_task_type)
if len(cluster_spec.job_tasks(chief_task_type)) > 1:
raise ValueError(
'The "cluster" in TF_CONFIG must have only one "%s" node.' %
chief_task_type)
task_type = task_env.get(_TASK_TYPE_KEY, None)
task_id = task_env.get(_TASK_ID_KEY, None)
if not task_type:
raise ValueError(
'If "cluster" is set in TF_CONFIG, task type must be set.')
if task_id is None:
raise ValueError(
'If "cluster" is set in TF_CONFIG, task index must be set.')
task_id = int(task_id)
# Check the task id bounds. Upper bound is not necessary as
# - for evaluator, there is no upper bound.
# - for non-evaluator, task id is upper bounded by the number of jobs in
# cluster spec, which will be checked later (when retrieving the `master`)
if task_id < 0:
raise ValueError('Task index must be non-negative number.')
return task_type, task_id
def _validate_save_ckpt_with_replaced_keys(new_copy, replaced_keys):
"""Validates the save ckpt properties."""
# Ensure one (and only one) of save_steps and save_secs is not None.
# Also, if user sets one save ckpt property, say steps, the other one (secs)
# should be set as None to improve usability.
save_steps = new_copy.save_checkpoints_steps
save_secs = new_copy.save_checkpoints_secs
if ('save_checkpoints_steps' in replaced_keys and
'save_checkpoints_secs' in replaced_keys):
# If user sets both properties explicitly, we need to error out if both
# are set or neither of them are set.
if save_steps is not None and save_secs is not None:
raise ValueError(_SAVE_CKPT_ERR)
elif 'save_checkpoints_steps' in replaced_keys and save_steps is not None:
new_copy._save_checkpoints_secs = None # pylint: disable=protected-access
elif 'save_checkpoints_secs' in replaced_keys and save_secs is not None:
new_copy._save_checkpoints_steps = None # pylint: disable=protected-access
def _validate_properties(run_config):
"""Validates the properties."""
def _validate(property_name, cond, message):
property_value = getattr(run_config, property_name)
if property_value is not None and not cond(property_value):
raise ValueError(message)
_validate('model_dir', lambda dir: dir,
message='model_dir should be non-empty')
_validate('save_summary_steps', lambda steps: steps >= 0,
message='save_summary_steps should be >= 0')
_validate('save_checkpoints_steps', lambda steps: steps >= 0,
message='save_checkpoints_steps should be >= 0')
_validate('save_checkpoints_secs', lambda secs: secs >= 0,
message='save_checkpoints_secs should be >= 0')
_validate('session_config',
lambda sc: isinstance(sc, config_pb2.ConfigProto),
message='session_config must be instance of ConfigProto')
_validate('keep_checkpoint_max', lambda keep_max: keep_max >= 0,
message='keep_checkpoint_max should be >= 0')
_validate('keep_checkpoint_every_n_hours', lambda keep_hours: keep_hours > 0,
message='keep_checkpoint_every_n_hours should be > 0')
_validate('log_step_count_steps', lambda num_steps: num_steps > 0,
message='log_step_count_steps should be > 0')
_validate('tf_random_seed', lambda seed: isinstance(seed, six.integer_types),
message='tf_random_seed must be integer.')
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
CHIEF = 'chief'
EVALUATOR = 'evaluator'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
def __init__(self,
model_dir=None,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_steps=_USE_DEFAULT,
save_checkpoints_secs=_USE_DEFAULT,
session_config=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100):
"""Constructs a RunConfig.
All distributed training related properties `cluster_spec`, `is_chief`,
`master` , `num_worker_replicas`, `num_ps_replicas`, `task_id`, and
`task_type` are set based on the `TF_CONFIG` environment variable, if the
pertinent information is present. The `TF_CONFIG` environment variable is a
JSON object with attributes: `cluster` and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the `TaskType` enums) to
a list of task addresses.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. ` When `TF_CONFIG` contains said information,
the following properties are set on this class:
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}. If
present, must have one and only one node in the `chief` attribute of
`cluster_spec`.
* `task_type` is set to `TF_CONFIG['task']['type']`. Must set if
`cluster_spec` is present; must be `worker` (the default value) if
`cluster_spec` is not set.
* `task_id` is set to `TF_CONFIG['task']['index']`. Must set if
`cluster_spec` is present; must be 0 (the default value) if
`cluster_spec` is not set.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` and `chief` attributes of `cluster_spec`. Defaults to 1.
* `is_chief` is determined based on `task_type` and `cluster`.
There is a special node with `task_type` as `evaluator`, which is not part
of the (training) `cluster_spec`. It handles the distributed evaluation job.
Example of non-chief node:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 4
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Example of chief node:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'chief', 'index': 0}})
config = ClusterConfig()
assert config.master == 'host0:2222'
assert config.task_id == 0
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 4
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'chief'
assert config.is_chief
```
Example of evaluator node (evaluator is not part of training cluster):
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'evaluator', 'index': 0}})
config = ClusterConfig()
assert config.master == ''
assert config.evaluator_master == ''
assert config.task_id == 0
assert config.num_ps_replicas == 0
assert config.num_worker_replicas == 0
assert config.cluster_spec == {}
assert config.task_type == 'evaluator'
assert not config.is_chief
```
N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set,
`keep_checkpoint_max` might need to be adjusted accordingly, especially in
distributed training. For example, setting `save_checkpoints_secs` as 60
without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation
that checkpoint would be garbage collected after 5 minutes. In distributed
training, the evaluation job starts asynchronously and might fail to load or
find the checkpoint due to race condition.
Args:
model_dir: directory where model parameters, graph, etc are saved. If
`None`, will use a default value set by the Estimator.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`. Defaults to 600 seconds if
both `save_checkpoints_steps` and `save_checkpoints_secs` are not set
in constructor. If both `save_checkpoints_steps` and
`save_checkpoints_secs` are None, then checkpoints are disabled.
session_config: a ConfigProto used to set session parameters, or None.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec will be logged during training.
Raises:
ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
are set.
"""
if (save_checkpoints_steps == _USE_DEFAULT and
save_checkpoints_secs == _USE_DEFAULT):
save_checkpoints_steps = None
save_checkpoints_secs = 600
elif save_checkpoints_secs == _USE_DEFAULT:
save_checkpoints_secs = None
elif save_checkpoints_steps == _USE_DEFAULT:
save_checkpoints_steps = None
elif (save_checkpoints_steps is not None and
save_checkpoints_secs is not None):
raise ValueError(_SAVE_CKPT_ERR)
RunConfig._replace(
self,
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
model_dir=model_dir,
tf_random_seed=tf_random_seed,
save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoints_steps,
save_checkpoints_secs=save_checkpoints_secs,
session_config=session_config,
keep_checkpoint_max=keep_checkpoint_max,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
log_step_count_steps=log_step_count_steps)
self._init_distributed_setting_from_environment_var()
def _init_distributed_setting_from_environment_var(self):
"""Initialize distributed properties based on environment variable."""
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV) or '{}')
if tf_config:
logging.info('TF_CONFIG environment variable: %s', tf_config)
self._service = _validate_service(tf_config.get(_SERVICE_KEY))
self._cluster_spec = server_lib.ClusterSpec(tf_config.get(_CLUSTER_KEY, {}))
task_env = tf_config.get(_TASK_ENV_KEY, {})
if self._cluster_spec and TaskType.MASTER in self._cluster_spec.jobs:
return self._init_distributed_setting_from_environment_var_with_master(
tf_config)
if self._cluster_spec:
# Distributed mode.
self._task_type, self._task_id = _validate_task_type_and_task_id(
self._cluster_spec, task_env, TaskType.CHIEF)
if self._task_type != TaskType.EVALUATOR:
self._master = _get_master(
self._cluster_spec, self._task_type, self._task_id)
self._num_ps_replicas = _count_ps(self._cluster_spec)
self._num_worker_replicas = _count_worker(
self._cluster_spec, chief_task_type=TaskType.CHIEF)
else:
# Evaluator is not part of the training cluster.
self._cluster_spec = server_lib.ClusterSpec({})
self._master = _LOCAL_MASTER
self._num_ps_replicas = 0
self._num_worker_replicas = 0
self._is_chief = self._task_type == TaskType.CHIEF
else:
# Local mode.
self._task_type = task_env.get(_TASK_TYPE_KEY, TaskType.WORKER)
self._task_id = int(task_env.get(_TASK_ID_KEY, 0))
if self._task_type != TaskType.WORKER:
raise ValueError(
'If "cluster" is not set in TF_CONFIG, task type must be WORKER.')
if self._task_id != 0:
raise ValueError(
'If "cluster" is not set in TF_CONFIG, task index must be 0.')
self._master = ''
self._is_chief = True
self._num_ps_replicas = 0
self._num_worker_replicas = 1
def _init_distributed_setting_from_environment_var_with_master(self,
tf_config):
"""Initialize distributed properties for legacy cluster with `master`."""
# There is no tech reason, why user cannot have chief and master in the same
# cluster, but it is super confusing (which is really the chief?). So, block
# this case.
if TaskType.CHIEF in self._cluster_spec.jobs:
raise ValueError('If `master` node exists in `cluster`, job '
'`chief` is not supported.')
task_env = tf_config.get(_TASK_ENV_KEY, {})
self._task_type, self._task_id = _validate_task_type_and_task_id(
self._cluster_spec, task_env, TaskType.MASTER)
if self._task_type == TaskType.EVALUATOR:
raise ValueError('If `master` node exists in `cluster`, task_type '
'`evaluator` is not supported.')
self._master = _get_master(
self._cluster_spec, self._task_type, self._task_id)
self._num_ps_replicas = _count_ps(self._cluster_spec)
self._num_worker_replicas = _count_worker(
self._cluster_spec, chief_task_type=TaskType.MASTER)
self._is_chief = self._task_type == TaskType.MASTER
@property
def cluster_spec(self):
return self._cluster_spec
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def task_type(self):
return self._task_type
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def session_config(self):
return self._session_config
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
@property
def model_dir(self):
return self._model_dir
@property
def service(self):
"""Returns the platform defined (in TF_CONFIG) service dict."""
return self._service
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
Only the properties in the following list are allowed to be replaced:
- `model_dir`.
- `tf_random_seed`,
- `save_summary_steps`,
- `save_checkpoints_steps`,
- `save_checkpoints_secs`,
- `session_config`,
- `keep_checkpoint_max`,
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
Args:
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
return RunConfig._replace(
copy.deepcopy(self),
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
**kwargs)
@staticmethod
def _replace(config, allowed_properties_list=None, **kwargs):
"""See `replace`.
N.B.: This implementation assumes that for key named "foo", the underlying
property the RunConfig holds is "_foo" (with one leading underscore).
Args:
config: The RunConfig to replace the values of.
allowed_properties_list: The property name list allowed to be replaced.
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
allowed_properties_list = allowed_properties_list or []
for key, new_value in six.iteritems(kwargs):
if key in allowed_properties_list:
setattr(config, '_' + key, new_value)
continue
raise ValueError(
'Replacing {} is not supported. Allowed properties are {}.'.format(
key, allowed_properties_list))
_validate_save_ckpt_with_replaced_keys(config, kwargs.keys())
_validate_properties(config)
return config
| apache-2.0 |
blrm/openshift-tools | openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/src/ansible/oc_label.py | 84 | 1037 | # pylint: skip-file
# flake8: noqa
def main():
''' ansible oc module for labels '''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list', 'add']),
debug=dict(default=False, type='bool'),
kind=dict(default='node', type='str',
choices=['node', 'pod', 'namespace']),
name=dict(default=None, type='str'),
namespace=dict(default=None, type='str'),
labels=dict(default=None, type='list'),
selector=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive=(['name', 'selector']),
)
results = OCLabel.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
| apache-2.0 |
GarrettHoffman/ThinkStats2 | code/nsfg2.py | 74 | 2153 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import thinkstats2
def MakeFrames():
"""Reads pregnancy data and partitions first babies and others.
returns: DataFrames (all live births, first babies, others)
"""
preg = ReadFemPreg()
live = preg[preg.outcome == 1]
firsts = live[live.birthord == 1]
others = live[live.birthord != 1]
assert(len(live) == 14292)
assert(len(firsts) == 6683)
assert(len(others) == 7609)
return live, firsts, others
def ReadFemPreg(dct_file='2006_2010_FemPregSetup.dct',
dat_file='2006_2010_FemPreg.dat.gz'):
"""Reads the NSFG 2006-2010 pregnancy data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip')
CleanFemPreg(df)
return df
def CleanFemPreg(df):
"""Recodes variables from the pregnancy frame.
df: DataFrame
"""
# mother's age is encoded in centiyears; convert to years
df.agepreg /= 100.0
# birthwgt_lb contains at least one bogus value (51 lbs)
# replace with NaN
df.birthwgt_lb1[df.birthwgt_lb1 > 20] = np.nan
# replace 'not ascertained', 'refused', 'don't know' with NaN
na_vals = [97, 98, 99]
df.birthwgt_lb1.replace(na_vals, np.nan, inplace=True)
df.birthwgt_oz1.replace(na_vals, np.nan, inplace=True)
# birthweight is stored in two columns, lbs and oz.
# convert to a single column in lb
# NOTE: creating a new column requires dictionary syntax,
# not attribute assignment (like df.totalwgt_lb)
df['totalwgt_lb'] = df.birthwgt_lb1 + df.birthwgt_oz1 / 16.0
# due to a bug in ReadStataDct, the last variable gets clipped;
# so for now set it to NaN
df.phase = np.nan
def main():
live, firsts, others = MakeFrames()
if __name__ == '__main__':
main()
| gpl-3.0 |
JesseLivezey/plankton | pylearn2/optimization/minres.py | 42 | 15929 | """
Note: this code is inspired from the following matlab source :
http://www.stanford.edu/group/SOL/software/minres.html
"""
import theano
import theano.tensor as TT
from theano.sandbox.scan import scan
import numpy
from pylearn2.utils import constantX
from pylearn2.expr.basic import multiple_switch, symGivens2, \
sqrt_inner_product, inner_product
# Messages that matches the flag value returned by the method
messages = [
' beta1 = 0. The exact solution is x = 0. ', # 0
' A solution to (poss. singular) Ax = b found, given rtol. ', # 1
' A least-squares solution was found, given rtol. ', # 2
' A solution to (poss. singular) Ax = b found, given eps. ', # 3
' A least-squares solution was found, given eps. ', # 4
' x has converged to an eigenvector. ', # 5
' xnorm has exceeded maxxnorm. ', # 6
' Acond has exceeded Acondlim. ', # 7
' The iteration limit was reached. ', # 8
' A least-squares solution for singular LS problem, given eps. ', # 9
' A least-squares solution for singular LS problem, given rtol.', # 10
' A null vector obtained, given rtol. ', # 11
' Numbers are too small to continue computation '] # 12
def minres(compute_Av,
bs,
rtol=constantX(1e-6),
maxit=20,
Ms=None,
shift=constantX(0.),
maxxnorm=constantX(1e15),
Acondlim=constantX(1e16),
profile=0):
"""
Attempts to find the minimum-length and minimum-residual-norm
solution :math:`x` to the system of linear equations :math:`A*x = b`
or least squares problem :math:`\\min||Ax-b||`.
The n-by-n coefficient matrix A must be symmetric (but need not be
positive definite or invertible). The right-hand-side column vector
b must have length n.
.. note:: This code is inspired from
http://www.stanford.edu/group/SOL/software/minres.html .
Parameters
----------
compute_Av : callable
Callable returing the symbolic expression for
`Av` (the product of matrix A with some vector v).
`v` should be a list of tensors, where the vector v means
the vector obtain by concatenating and flattening all tensors in v
bs : list
List of Theano expressions. We are looking to compute `A^-1\dot bs`.
rtol : float, optional
Specifies the tolerance of the method. Default is 1e-6.
maxit : int, positive, optional
Specifies the maximum number of iterations. Default is 20.
Ms : list
List of theano expression of same shape as `bs`. The method uses
these to precondition with diag(Ms)
shift : float, optional
Default is 0. Effectively solve the system (A - shift I) * x = b.
maxxnorm : float, positive, optional
Maximum bound on NORM(x). Default is 1e14.
Acondlim : float, positive, optional
Maximum bound on COND(A). Default is 1e15.
show : bool
If True, show iterations, otherwise suppress outputs. Default is
False.
Returns
-------
x : list
List of Theano tensor representing the solution
flag : tensor_like
Theano int scalar - convergence flag
0. beta1 = 0. The exact solution is x = 0.
1. A solution to (poss. singular) Ax = b found, given rtol.
2. Pseudoinverse solution for singular LS problem, given rtol.
3. A solution to (poss. singular) Ax = b found, given eps.
4. Pseudoinverse solution for singular LS problem, given eps.
5. x has converged to an eigenvector.
6. xnorm has exceeded maxxnorm.
7. Acond has exceeded Acondlim.
8. The iteration limit was reached.
9. 10. It is a least squares problem but no converged
solution yet.
iter : int
Iteration number at which x was computed: `0 <= iter <= maxit`.
relres : float
Real positive, the relative residual is defined as
NORM(b-A*x)/(NORM(A) * NORM(x) + NORM(b)),
computed recurrently here. If flag is 1 or 3, relres <= TOL.
relAres : float
Real positive, the relative-NORM(Ar) := NORM(Ar) / NORM(A)
computed recurrently here. If flag is 2 or 4, relAres <= TOL.
Anorm : float
Real positive, estimate of matrix 2-norm of A.
Acond : float
Real positive, estimate of condition number of A with respect to
2-norm.
xnorm : float
Non-negative positive, recurrently computed NORM(x)
Axnorm : float
Non-negative positive, recurrently computed NORM(A * x).
References
----------
.. [1] Choi, Sou-Cheng. Iterative Methods for Singular Linear
Equations and Least-Squares Problems, PhD Dissertation,
Stanford University, 2006.
"""
if not isinstance(bs, (tuple, list)):
bs = [bs]
return_as_list = False
else:
bs = list(bs)
return_as_list = True
eps = constantX(1e-23)
# Initialise
beta1 = sqrt_inner_product(bs)
#------------------------------------------------------------------
# Set up p and v for the first Lanczos vector v1.
# p = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
#------------------------------------------------------------------
r3s = [b for b in bs]
r2s = [b for b in bs]
r1s = [b for b in bs]
if Ms is not None:
r3s = [b / m for b, m in zip(bs, Ms)]
beta1 = sqrt_inner_product(r3s, bs)
#------------------------------------------------------------------
## Initialize other quantities.
# Note that Anorm has been initialized by IsOpSym6.
# ------------------------------------------------------------------
bnorm = beta1
n_params = len(bs)
def loop(niter,
beta,
betan,
phi,
Acond,
cs,
dbarn,
eplnn,
rnorm,
sn,
Tnorm,
rnorml,
xnorm,
Dnorm,
gamma,
pnorm,
gammal,
Axnorm,
relrnorm,
relArnorml,
Anorm,
flag,
*args):
#-----------------------------------------------------------------
## Obtain quantities for the next Lanczos vector vk+1, k = 1, 2,...
# The general iteration is similar to the case k = 1 with v0 = 0:
#
# p1 = Operator * v1 - beta1 * v0,
# alpha1 = v1'p1,
# q2 = p2 - alpha1 * v1,
# beta2^2 = q2'q2,
# v2 = (1/beta2) q2.
#
# Again, p = betak P vk, where P = C**(-1).
# .... more description needed.
#-----------------------------------------------------------------
xs = args[0 * n_params: 1 * n_params]
r1s = args[1 * n_params: 2 * n_params]
r2s = args[2 * n_params: 3 * n_params]
r3s = args[3 * n_params: 4 * n_params]
dls = args[4 * n_params: 5 * n_params]
ds = args[5 * n_params: 6 * n_params]
betal = beta
beta = betan
vs = [r3 / beta for r3 in r3s]
r3s, upds = compute_Av(*vs)
r3s = [r3 - shift * v for r3, v in zip(r3s, vs)]
r3s = [TT.switch(TT.ge(niter, constantX(1.)),
r3 - (beta / betal) * r1,
r3) for r3, r1 in zip(r3s, r1s)]
alpha = inner_product(r3s, vs)
r3s = [r3 - (alpha / beta) * r2 for r3, r2 in zip(r3s, r2s)]
r1s = [r2 for r2 in r2s]
r2s = [r3 for r3 in r3s]
if Ms is not None:
r3s = [r3 / M for r3, M in zip(r3s, Ms)]
betan = sqrt_inner_product(r2s, r3s)
else:
betan = sqrt_inner_product(r3s)
pnorml = pnorm
pnorm = TT.switch(TT.eq(niter, constantX(0.)),
TT.sqrt(TT.sqr(alpha) + TT.sqr(betan)),
TT.sqrt(TT.sqr(alpha) + TT.sqr(betan) +
TT.sqr(beta)))
#-----------------------------------------------------------------
## Apply previous rotation Qk-1 to get
# [dlta_k epln_{k+1}] = [cs sn][dbar_k 0 ]
# [gbar_k dbar_{k+1} ] [sn -cs][alpha_k beta_{k+1}].
#-----------------------------------------------------------------
dbar = dbarn
epln = eplnn
dlta = cs * dbar + sn * alpha
gbar = sn * dbar - cs * alpha
eplnn = sn * betan
dbarn = -cs * betan
## Compute the current plane rotation Qk
gammal2 = gammal
gammal = gamma
cs, sn, gamma = symGivens2(gbar, betan)
tau = cs * phi
phi = sn * phi
Axnorm = TT.sqrt(TT.sqr(Axnorm) + TT.sqr(tau))
# Update d
dl2s = [dl for dl in dls]
dls = [d for d in ds]
ds = [TT.switch(TT.neq(gamma, constantX(0.)),
(v - epln * dl2 - dlta * dl) / gamma,
v)
for v, dl2, dl in zip(vs, dl2s, dls)]
d_norm = TT.switch(TT.neq(gamma, constantX(0.)),
sqrt_inner_product(ds),
constantX(numpy.inf))
# Update x except if it will become too big
xnorml = xnorm
dl2s = [x for x in xs]
xs = [x + tau * d for x, d in zip(xs, ds)]
xnorm = sqrt_inner_product(xs)
xs = [TT.switch(TT.ge(xnorm, maxxnorm),
dl2, x)
for dl2, x in zip(dl2s, xs)]
flag = TT.switch(TT.ge(xnorm, maxxnorm),
constantX(6.), flag)
# Estimate various norms
rnorml = rnorm # ||r_{k-1}||
Anorml = Anorm
Acondl = Acond
relrnorml = relrnorm
flag_no_6 = TT.neq(flag, constantX(6.))
Dnorm = TT.switch(flag_no_6,
TT.sqrt(TT.sqr(Dnorm) + TT.sqr(d_norm)),
Dnorm)
xnorm = TT.switch(flag_no_6, sqrt_inner_product(xs), xnorm)
rnorm = TT.switch(flag_no_6, phi, rnorm)
relrnorm = TT.switch(flag_no_6,
rnorm / (Anorm * xnorm + bnorm),
relrnorm)
Tnorm = TT.switch(flag_no_6,
TT.switch(TT.eq(niter, constantX(0.)),
TT.sqrt(TT.sqr(alpha) + TT.sqr(betan)),
TT.sqrt(TT.sqr(Tnorm) +
TT.sqr(beta) +
TT.sqr(alpha) +
TT.sqr(betan))),
Tnorm)
Anorm = TT.maximum(Anorm, pnorm)
Acond = Anorm * Dnorm
rootl = TT.sqrt(TT.sqr(gbar) + TT.sqr(dbarn))
Anorml = rnorml * rootl
relArnorml = rootl / Anorm
#---------------------------------------------------------------
# See if any of the stopping criteria are satisfied.
# In rare cases, flag is already -1 from above (Abar = const*I).
#---------------------------------------------------------------
epsx = Anorm * xnorm * eps
epsr = Anorm * xnorm * rtol
#Test for singular Hk (hence singular A)
# or x is already an LS solution (so again A must be singular).
t1 = constantX(1) + relrnorm
t2 = constantX(1) + relArnorml
flag = TT.switch(
TT.bitwise_or(TT.eq(flag, constantX(0)),
TT.eq(flag, constantX(6))),
multiple_switch(TT.le(t1, constantX(1)),
constantX(3),
TT.le(t2, constantX(1)),
constantX(4),
TT.le(relrnorm, rtol),
constantX(1),
TT.le(Anorm, constantX(1e-20)),
constantX(12),
TT.le(relArnorml, rtol),
constantX(10),
TT.ge(epsx, beta1),
constantX(5),
TT.ge(xnorm, maxxnorm),
constantX(6),
TT.ge(niter, TT.cast(maxit,
theano.config.floatX)),
constantX(8),
flag),
flag)
flag = TT.switch(TT.lt(Axnorm, rtol * Anorm * xnorm),
constantX(11.),
flag)
return [niter + constantX(1.),
beta,
betan,
phi,
Acond,
cs,
dbarn,
eplnn,
rnorm,
sn,
Tnorm,
rnorml,
xnorm,
Dnorm,
gamma,
pnorm,
gammal,
Axnorm,
relrnorm,
relArnorml,
Anorm,
flag] + xs + r1s + r2s + r3s + dls + ds, upds, \
theano.scan_module.scan_utils.until(TT.neq(flag, 0))
states = []
# 0 niter
states.append(constantX([0]))
# 1 beta
states.append(constantX([0]))
# 2 betan
states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0))
# 3 phi
states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0))
# 4 Acond
states.append(constantX([1]))
# 5 cs
states.append(constantX([-1]))
# 6 dbarn
states.append(constantX([0]))
# 7 eplnn
states.append(constantX([0]))
# 8 rnorm
states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0))
# 9 sn
states.append(constantX([0]))
# 10 Tnorm
states.append(constantX([0]))
# 11 rnorml
states.append(TT.unbroadcast(TT.shape_padleft(beta1), 0))
# 12 xnorm
states.append(constantX([0]))
# 13 Dnorm
states.append(constantX([0]))
# 14 gamma
states.append(constantX([0]))
# 15 pnorm
states.append(constantX([0]))
# 16 gammal
states.append(constantX([0]))
# 17 Axnorm
states.append(constantX([0]))
# 18 relrnorm
states.append(constantX([1]))
# 19 relArnorml
states.append(constantX([1]))
# 20 Anorm
states.append(constantX([0]))
# 21 flag
states.append(constantX([0]))
xs = [TT.unbroadcast(TT.shape_padleft(TT.zeros_like(b)), 0) for b in bs]
ds = [TT.unbroadcast(TT.shape_padleft(TT.zeros_like(b)), 0) for b in bs]
dls = [TT.unbroadcast(TT.shape_padleft(TT.zeros_like(b)), 0) for b in bs]
r1s = [TT.unbroadcast(TT.shape_padleft(r1), 0) for r1 in r1s]
r2s = [TT.unbroadcast(TT.shape_padleft(r2), 0) for r2 in r2s]
r3s = [TT.unbroadcast(TT.shape_padleft(r3), 0) for r3 in r3s]
rvals, loc_updates = scan(
loop,
states=states + xs + r1s + r2s + r3s + dls + ds,
n_steps=maxit + numpy.int32(1),
name='minres',
profile=profile,
mode=theano.Mode(linker='cvm'))
assert isinstance(loc_updates, dict) and 'Ordered' in str(type(loc_updates))
niters = TT.cast(rvals[0][0], 'int32')
flag = TT.cast(rvals[21][0], 'int32')
relres = rvals[18][0]
relAres = rvals[19][0]
Anorm = rvals[20][0]
Acond = rvals[4][0]
xnorm = rvals[12][0]
Axnorm = rvals[17][0]
sol = [x[0] for x in rvals[22: 22 + n_params]]
return (sol,
flag,
niters,
relres,
relAres,
Anorm,
Acond,
xnorm,
Axnorm,
loc_updates)
| bsd-3-clause |
hastexo/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_asides.py | 4 | 2085 | """
Tests for Asides
"""
from web_fragments.fragment import Fragment
from xblock.core import XBlockAside
from xblock.fields import Scope, String
from unittest import TestCase
from xmodule.modulestore.tests.utils import XmlModulestoreBuilder
from mock import patch
class AsideTestType(XBlockAside):
"""
Test Aside type
"""
FRAG_CONTENT = u"<p>Aside rendered</p>"
content = String(default="default_content", scope=Scope.content)
data_field = String(default="default_data", scope=Scope.settings)
@XBlockAside.aside_for('student_view')
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""Add to the student view"""
return Fragment(self.FRAG_CONTENT)
class TestAsidesXmlStore(TestCase):
"""
Test Asides sourced from xml store
"""
@patch('xmodule.modulestore.xml.ImportSystem.applicable_aside_types', lambda self, block: ['test_aside'])
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
def test_xml_aside(self):
"""
Check that the xml modulestore read in all the asides with their values
"""
with XmlModulestoreBuilder().build(course_ids=['edX/aside_test/2012_Fall']) as (__, store):
def check_block(block):
"""
Check whether block has the expected aside w/ its fields and then recurse to the block's children
"""
asides = block.runtime.get_asides(block)
self.assertEqual(len(asides), 1, "Found {} asides but expected only test_aside".format(asides))
self.assertIsInstance(asides[0], AsideTestType)
category = block.scope_ids.block_type
self.assertEqual(asides[0].data_field, "{} aside data".format(category))
self.assertEqual(asides[0].content, "{} Aside".format(category.capitalize()))
for child in block.get_children():
check_block(child)
check_block(store.get_course(store.make_course_key('edX', "aside_test", "2012_Fall")))
| agpl-3.0 |
s142857/servo | tests/wpt/harness/wptrunner/browsers/webdriver.py | 194 | 4219 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import errno
import socket
import time
import traceback
import urlparse
import mozprocess
from .base import get_free_port, cmd_arg
__all__ = ["SeleniumLocalServer", "ChromedriverLocalServer"]
class LocalServer(object):
used_ports = set()
default_endpoint = "/"
def __init__(self, logger, binary, port=None, endpoint=None):
self.logger = logger
self.binary = binary
self.port = port
self.endpoint = endpoint or self.default_endpoint
if self.port is None:
self.port = get_free_port(4444, exclude=self.used_ports)
self.used_ports.add(self.port)
self.url = "http://127.0.0.1:%i%s" % (self.port, self.endpoint)
self.proc, self.cmd = None, None
def start(self):
self.proc = mozprocess.ProcessHandler(
self.cmd, processOutputLine=self.on_output)
try:
self.proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"chromedriver executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for server to become accessible: %s" % self.url)
surl = urlparse.urlparse(self.url)
addr = (surl.hostname, surl.port)
try:
wait_service(addr)
except:
self.logger.error(
"Server was not accessible within the timeout:\n%s" % traceback.format_exc())
raise
else:
self.logger.info("Server listening on port %i" % self.port)
def stop(self):
if hasattr(self.proc, "proc"):
self.proc.kill()
def is_alive(self):
if hasattr(self.proc, "proc"):
exitcode = self.proc.poll()
return exitcode is None
return False
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self.cmd))
@property
def pid(self):
if hasattr(self.proc, "proc"):
return self.proc.pid
class SeleniumLocalServer(LocalServer):
default_endpoint = "/wd/hub"
def __init__(self, logger, binary, port=None):
LocalServer.__init__(self, logger, binary, port=port)
self.cmd = ["java",
"-jar", self.binary,
"-port", str(self.port)]
def start(self):
self.logger.debug("Starting local Selenium server")
LocalServer.start(self)
def stop(self):
LocalServer.stop(self)
self.logger.info("Selenium server stopped listening")
class ChromedriverLocalServer(LocalServer):
default_endpoint = "/wd/hub"
def __init__(self, logger, binary="chromedriver", port=None, endpoint=None):
LocalServer.__init__(self, logger, binary, port=port, endpoint=endpoint)
# TODO: verbose logging
self.cmd = [self.binary,
cmd_arg("port", str(self.port)) if self.port else "",
cmd_arg("url-base", self.endpoint) if self.endpoint else ""]
def start(self):
self.logger.debug("Starting local chromedriver server")
LocalServer.start(self)
def stop(self):
LocalServer.stop(self)
self.logger.info("chromedriver server stopped listening")
def wait_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECONNREFUSED:
raise
else:
return True
finally:
so.close()
time.sleep(0.5)
raise socket.error("Service is unavailable: %s:%i" % addr)
| mpl-2.0 |
josenavas/QiiTa | qiita_pet/handlers/api_proxy/util.py | 5 | 2194 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import exists, join
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_db.study import Study
from qiita_db.user import User
from qiita_db.util import get_mountpoint
def check_access(study_id, user_id):
"""Checks if user given has access to the study given
Parameters
----------
study_id : int
ID of the study to check access to
user_id : str
ID of the user to check access for
Returns
-------
dict
Empty dict if access allowed, else a dict in the form
{'status': 'error',
'message': reason for error}
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
return {'status': 'error',
'message': 'Study does not exist'}
if not study.has_access(User(user_id)):
return {'status': 'error',
'message': 'User does not have access to study'}
return {}
def check_fp(study_id, filename):
"""Check whether an uploaded file exists
Parameters
----------
study_id : int
Study file uploaded to
filename : str
name of the uploaded file
Returns
-------
dict
{'status': status,
'message': msg,
'file': str}
file contains full filepath if status is success, otherwise it contains
the filename
"""
# Get the uploads folder
_, base_fp = get_mountpoint("uploads")[0]
# Get the path of the sample template in the uploads folder
fp_rsp = join(base_fp, str(study_id), filename)
if not exists(fp_rsp):
# The file does not exist, fail nicely
return {'status': 'error',
'message': 'file does not exist',
'file': filename}
return {'status': 'success',
'message': '',
'file': fp_rsp}
| bsd-3-clause |
CredoReference/edx-platform | common/test/acceptance/tests/lms/test_lms_courseware.py | 9 | 42019 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import json
from datetime import datetime, timedelta
import ddt
from nose.plugins.attrib import attr
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.common.auto_auth import AutoAuthPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.course_home import CourseHomePage
from ...pages.lms.courseware import CoursewarePage, CoursewareSequentialTabPage, RenderXBlockPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.pay_and_verify import FakePaymentPage, FakeSoftwareSecureVerificationPage, PaymentAndVerificationFlow
from ...pages.lms.problem import ProblemPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.staff_view import StaffCoursewarePage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from ..helpers import EventsTestMixin, UniqueCourseTest, auto_auth, create_multiple_choice_problem
@attr(shard=9)
class CoursewareTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _goto_problem_page(self):
"""
Open problem page with assertion.
"""
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser) # pylint: disable=attribute-defined-outside-init
self.assertEqual(self.problem_page.problem_name, 'Test Problem 1')
def test_courseware(self):
"""
Test courseware if recent visited subsection become unpublished.
"""
# Visit problem page as a student.
self._goto_problem_page()
# Logout and login as a staff user.
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
# Visit course outline page in studio.
self.studio_course_outline.visit()
# Set release date for subsection in future.
self.studio_course_outline.change_problem_release_date()
# Logout and login as a student.
LogoutPage(self.browser).visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
# Visit courseware as a student.
self.courseware_page.visit()
# Problem name should be "Test Problem 2".
self.assertEqual(self.problem_page.problem_name, 'Test Problem 2')
def test_course_tree_breadcrumb(self):
"""
Scenario: Correct course tree breadcrumb is shown.
Given that I am a registered user
And I visit my courseware page
Then I should see correct course tree breadcrumb
"""
xblocks = self.course_fix.get_nested_xblocks(category="problem")
for index in range(1, len(xblocks) + 1):
test_section_title = 'Test Section {}'.format(index)
test_subsection_title = 'Test Subsection {}'.format(index)
test_unit_title = 'Test Problem {}'.format(index)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section(test_section_title, test_subsection_title)
course_nav = self.courseware_page.nav
self.assertEqual(course_nav.breadcrumb_section_title, test_section_title)
self.assertEqual(course_nav.breadcrumb_subsection_title, test_subsection_title)
self.assertEqual(course_nav.breadcrumb_unit_title, test_unit_title)
@attr(shard=9)
@ddt.ddt
class ProctoredExamTest(UniqueCourseTest):
"""
Tests for proctored exams.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(ProctoredExamTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
).install()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(
self.browser, self.course_id, entry_point='verify-now'
)
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
self.problem_page = ProblemPage(self.browser)
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate',
min_price=10, suggested_prices='10,20'
).visit()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _login_as_a_verified_user(self):
"""
login as a verififed user
"""
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
# the track selection page cannot be visited. see the other tests to see if any prereq is there.
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
def _verify_user(self):
"""
Takes user through the verification flow and then marks the verification as 'approved'.
"""
# Immediately verify the user
self.immediate_verification_page.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Mark the verification as passing.
verification = FakeSoftwareSecureVerificationPage(self.browser).visit()
verification.mark_approved()
def test_can_create_proctored_exam_in_studio(self):
"""
Given that I am a staff member
When I visit the course outline page in studio.
And open the subsection edit dialog
Then I can view all settings related to Proctored and timed exams
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.assertTrue(self.studio_course_outline.proctoring_items_are_displayed())
def _setup_and_take_timed_exam(self, hide_after_due=False):
"""
Helper to perform the common action "set up a timed exam as staff,
then take it as student"
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_timed(hide_after_due=hide_after_due)
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
self.courseware_page.start_timed_exam()
self.assertTrue(self.courseware_page.is_timer_bar_present)
self.courseware_page.stop_timed_exam()
self.courseware_page.wait_for_page()
self.assertTrue(self.courseware_page.has_submitted_exam_message())
LogoutPage(self.browser).visit()
@ddt.data(True, False)
def test_timed_exam_flow(self, hide_after_due):
"""
Given that I am a staff member on the exam settings section
select advanced settings tab
When I Make the exam timed.
And I login as a verified student.
And visit the courseware as a verified student.
And I start the timed exam
Then I am taken to the exam with a timer bar showing
When I finish the exam
Then I see the exam submitted dialog in place of the exam
When I log back into studio as a staff member
And change the problem's due date to be in the past
And log back in as the original verified student
Then I see the exam or message in accordance with the hide_after_due setting
"""
self._setup_and_take_timed_exam(hide_after_due)
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
last_week = (datetime.today() - timedelta(days=7)).strftime("%m/%d/%Y")
self.studio_course_outline.change_problem_due_date(last_week)
LogoutPage(self.browser).visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.assertEqual(self.courseware_page.has_submitted_exam_message(), hide_after_due)
def test_field_visiblity_with_all_exam_types(self):
"""
Given that I am a staff member
And I have visited the course outline page in studio.
And the subsection edit dialog is open
select advanced settings tab
For each of None, Timed, Proctored, and Practice exam types
The time allotted and review rules fields have proper visibility
None: False, False
Timed: True, False
Proctored: True, True
Practice: True, False
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.select_none_exam()
self.assertFalse(self.studio_course_outline.time_allotted_field_visible())
self.assertFalse(self.studio_course_outline.exam_review_rules_field_visible())
self.studio_course_outline.select_timed_exam()
self.assertTrue(self.studio_course_outline.time_allotted_field_visible())
self.assertFalse(self.studio_course_outline.exam_review_rules_field_visible())
self.studio_course_outline.select_proctored_exam()
self.assertTrue(self.studio_course_outline.time_allotted_field_visible())
self.assertTrue(self.studio_course_outline.exam_review_rules_field_visible())
self.studio_course_outline.select_practice_exam()
self.assertTrue(self.studio_course_outline.time_allotted_field_visible())
self.assertFalse(self.studio_course_outline.exam_review_rules_field_visible())
class CoursewareMultipleVerticalsTestBase(UniqueCourseTest, EventsTestMixin):
"""
Base class with setup for testing courseware with multiple verticals
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareMultipleVerticalsTestBase, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 1,2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3', data='<problem>problem 3 dummy body</problem>'),
),
XBlockFixtureDesc(
'sequential', 'Test HIDDEN Subsection', metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('problem', 'Test HIDDEN Problem', data='<problem>hidden problem</problem>'),
),
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 4', data='<problem>problem 4 dummy body</problem>'),
),
),
XBlockFixtureDesc('chapter', 'Test HIDDEN Section', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('sequential', 'Test HIDDEN Subsection'),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
@attr(shard=9)
class CoursewareMultipleVerticalsTest(CoursewareMultipleVerticalsTestBase):
"""
Test courseware with multiple verticals
"""
def test_navigation_buttons(self):
self.courseware_page.visit()
# start in first section
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 0, next_enabled=True, prev_enabled=False)
# next takes us to next tab in sequential
self.courseware_page.click_next_button_on_top()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 1, next_enabled=True, prev_enabled=True)
# go to last sequential position
self.courseware_page.go_to_sequential_position(4)
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 3, next_enabled=True, prev_enabled=True)
# next takes us to next sequential
self.courseware_page.click_next_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,2', 0, next_enabled=True, prev_enabled=True)
# next takes us to next chapter
self.courseware_page.click_next_button_on_top()
self.assert_navigation_state('Test Section 2', 'Test Subsection 2,1', 0, next_enabled=False, prev_enabled=True)
# previous takes us to previous chapter
self.courseware_page.click_previous_button_on_top()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,2', 0, next_enabled=True, prev_enabled=True)
# previous takes us to last tab in previous sequential
self.courseware_page.click_previous_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 3, next_enabled=True, prev_enabled=True)
# previous takes us to previous tab in sequential
self.courseware_page.click_previous_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 2, next_enabled=True, prev_enabled=True)
# test UI events emitted by navigation
filter_sequence_ui_event = lambda event: event.get('name', '').startswith('edx.ui.lms.sequence.')
sequence_ui_events = self.wait_for_events(event_filter=filter_sequence_ui_event, timeout=2)
legacy_events = [ev for ev in sequence_ui_events if ev['event_type'] in {'seq_next', 'seq_prev', 'seq_goto'}]
nonlegacy_events = [ev for ev in sequence_ui_events if ev not in legacy_events]
self.assertTrue(all('old' in json.loads(ev['event']) for ev in legacy_events))
self.assertTrue(all('new' in json.loads(ev['event']) for ev in legacy_events))
self.assertFalse(any('old' in json.loads(ev['event']) for ev in nonlegacy_events))
self.assertFalse(any('new' in json.loads(ev['event']) for ev in nonlegacy_events))
self.assert_events_match(
[
{
'event_type': 'seq_next',
'event': {
'old': 1,
'new': 2,
'current_tab': 1,
'tab_count': 4,
'widget_placement': 'top',
}
},
{
'event_type': 'seq_goto',
'event': {
'old': 2,
'new': 4,
'current_tab': 2,
'target_tab': 4,
'tab_count': 4,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.next_selected',
'event': {
'current_tab': 4,
'tab_count': 4,
'widget_placement': 'bottom',
}
},
{
'event_type': 'edx.ui.lms.sequence.next_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.previous_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.previous_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'bottom',
}
},
{
'event_type': 'seq_prev',
'event': {
'old': 4,
'new': 3,
'current_tab': 4,
'tab_count': 4,
'widget_placement': 'bottom',
}
},
],
sequence_ui_events
)
# TODO: TNL-6546: Delete this whole test if these events are going away(?)
def test_outline_selected_events(self):
self.courseware_page.visit()
self.courseware_page.nav.go_to_section('Test Section 1', 'Test Subsection 1,2')
self.courseware_page.nav.go_to_section('Test Section 2', 'Test Subsection 2,1')
# test UI events emitted by navigating via the course outline
filter_selected_events = lambda event: event.get('name', '') == 'edx.ui.lms.outline.selected'
selected_events = self.wait_for_events(event_filter=filter_selected_events, timeout=2)
# note: target_url is tested in unit tests, as the url changes here with every test (it includes GUIDs).
self.assert_events_match(
[
{
'event_type': 'edx.ui.lms.outline.selected',
'name': 'edx.ui.lms.outline.selected',
'event': {
'target_name': 'Test Subsection 1,2 ',
'widget_placement': 'accordion',
}
},
{
'event_type': 'edx.ui.lms.outline.selected',
'name': 'edx.ui.lms.outline.selected',
'event': {
'target_name': 'Test Subsection 2,1 ',
'widget_placement': 'accordion',
}
},
],
selected_events
)
# TODO: Delete as part of TNL-6546 / LEARNER-71
def test_link_clicked_events(self):
"""
Given that I am a user in the courseware
When I navigate via the left-hand nav
Then a link clicked event is logged
"""
self.courseware_page.visit()
self.courseware_page.nav.go_to_section('Test Section 1', 'Test Subsection 1,2')
self.courseware_page.nav.go_to_section('Test Section 2', 'Test Subsection 2,1')
filter_link_clicked = lambda event: event.get('name', '') == 'edx.ui.lms.link_clicked'
link_clicked_events = self.wait_for_events(event_filter=filter_link_clicked, timeout=2)
self.assertEqual(len(link_clicked_events), 2)
def assert_navigation_state(
self, section_title, subsection_title, subsection_position, next_enabled, prev_enabled
):
"""
Verifies that the navigation state is as expected.
"""
self.assertTrue(self.courseware_page.nav.is_on_section(section_title, subsection_title))
self.assertEquals(self.courseware_page.sequential_position, subsection_position)
self.assertEquals(self.courseware_page.is_next_button_enabled, next_enabled)
self.assertEquals(self.courseware_page.is_previous_button_enabled, prev_enabled)
def test_tab_position(self):
# test that using the position in the url direct to correct tab in courseware
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1,1')
subsection_url = self.browser.current_url
url_part_list = subsection_url.split('/')
course_id = url_part_list[-5]
chapter_id = url_part_list[-3]
subsection_id = url_part_list[-2]
problem1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=1
).visit()
self.assertIn('problem 1 dummy body', problem1_page.get_selected_tab_content())
html1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=2
).visit()
self.assertIn('html 1 dummy body', html1_page.get_selected_tab_content())
problem2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=3
).visit()
self.assertIn('problem 2 dummy body', problem2_page.get_selected_tab_content())
html2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=4
).visit()
self.assertIn('html 2 dummy body', html2_page.get_selected_tab_content())
@attr(shard=9)
class ProblemStateOnNavigationTest(UniqueCourseTest):
"""
Test courseware with problems in multiple verticals.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
problem1_name = 'MULTIPLE CHOICE TEST PROBLEM 1'
problem2_name = 'MULTIPLE CHOICE TEST PROBLEM 2'
def setUp(self):
super(ProblemStateOnNavigationTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with section, tabs and multiple choice problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
create_multiple_choice_problem(self.problem1_name),
create_multiple_choice_problem(self.problem2_name),
),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False
).visit()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def go_to_tab_and_assert_problem(self, position, problem_name):
"""
Go to sequential tab and assert that we are on problem whose name is given as a parameter.
Args:
position: Position of the sequential tab
problem_name: Name of the problem
"""
self.courseware_page.go_to_sequential_position(position)
self.problem_page.wait_for_element_presence(
self.problem_page.CSS_PROBLEM_HEADER,
'wait for problem header'
)
self.assertEqual(self.problem_page.problem_name, problem_name)
def test_perform_problem_submit_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then I click submit button
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state by clicking check button.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_submit()
self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_content
before_meta = self.problem_page.problem_meta
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_content
after_meta = self.problem_page.problem_meta
self.assertEqual(problem1_content_before_switch, problem1_content_after_coming_back)
self.assertEqual(before_meta, after_meta)
def test_perform_problem_save_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then I click save button
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state by clicking save button.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_save()
self.problem_page.wait_for_save_notification()
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_input_content
before_meta = self.problem_page.problem_meta
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
self.problem_page.wait_for_expected_status('span.unanswered', 'unanswered')
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_input_content
after_meta = self.problem_page.problem_meta
self.assertIn(problem1_content_after_coming_back, problem1_content_before_switch)
self.assertEqual(before_meta, after_meta)
@attr(shard=9)
class SubsectionHiddenAfterDueDateTest(UniqueCourseTest):
"""
Tests the "hide after due date" setting for
subsections.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(SubsectionHiddenAfterDueDateTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.logout_page = LogoutPage(self.browser)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
create_multiple_choice_problem('Test Problem 1')
)
)
).install()
self.progress_page = ProgressPage(self.browser, self.course_id)
self._setup_subsection()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _setup_subsection(self):
"""
Helper to set up a problem subsection as staff, then take
it as a student.
"""
self.logout_page.visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_visibility_tab()
self.studio_course_outline.make_subsection_hidden_after_due_date()
self.logout_page.visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.logout_page.visit()
def test_subsecton_hidden_after_due_date(self):
"""
Given that I am a staff member on the subsection settings section
And I select the advanced settings tab
When I Make the subsection hidden after its due date.
And I login as a student.
And visit the subsection in the courseware as a verified student.
Then I am able to see the subsection
And when I visit the progress page
Then I should be able to see my grade on the progress page
When I log in as staff
And I make the subsection due in the past so that the current date is past its due date
And I log in as a student
And I visit the subsection in the courseware
Then the subsection should be hidden with a message that its due date has passed
And when I visit the progress page
Then I should be able to see my grade on the progress page
"""
self.logout_page.visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.assertFalse(self.courseware_page.content_hidden_past_due_date())
self.progress_page.visit()
self.assertEqual(self.progress_page.scores('Test Section 1', 'Test Subsection 1'), [(0, 1)])
self.logout_page.visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
last_week = (datetime.today() - timedelta(days=7)).strftime("%m/%d/%Y")
self.studio_course_outline.change_problem_due_date(last_week)
self.logout_page.visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.content_hidden_past_due_date())
self.progress_page.visit()
self.assertEqual(self.progress_page.scores('Test Section 1', 'Test Subsection 1'), [(0, 1)])
@attr(shard=9)
class CompletionTestCase(UniqueCourseTest, EventsTestMixin):
"""
Test the completion on view functionality.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
COMPLETION_BY_VIEWING_DELAY_MS = '1000'
def setUp(self):
super(CompletionTestCase, self).setUp()
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.html_1_block = XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>")
self.problem_1_block = XBlockFixtureDesc(
'problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 1,1,1').add_children(
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc(
'html', 'html 2',
data=("<html>html 2 dummy body</html>" * 100) + "<span id='html2-end'>End</span>",
),
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
),
XBlockFixtureDesc('vertical', 'Test Unit 1,1,2').add_children(
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
),
XBlockFixtureDesc('vertical', 'Test Unit 1,1,2').add_children(
self.html_1_block,
self.problem_1_block,
),
),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def test_courseware_publish_completion_is_sent_on_view(self):
"""
Test that when viewing courseware XBlocks are correctly marked as completed on view.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
courseware_page.visit()
courseware_page.wait_for_page()
# Initially, the first two blocks in the first vertical should be marked as needing to be completed on view.
self.assertEqual(
courseware_page.xblock_components_mark_completed_on_view_value(),
[self.COMPLETION_BY_VIEWING_DELAY_MS, self.COMPLETION_BY_VIEWING_DELAY_MS, None],
)
# Wait and verify that the first block which is completely visible is marked as completed.
courseware_page.wait_for_xblock_component_to_be_marked_completed_on_view(0)
self.assertEqual(
courseware_page.xblock_components_mark_completed_on_view_value(),
['0', self.COMPLETION_BY_VIEWING_DELAY_MS, None],
)
# Scroll to the bottom of the second block.
courseware_page.scroll_to_element('#html2-end', 'Scroll to end of html 2 block')
# Wait and verify that the second block is also now marked as completed.
courseware_page.wait_for_xblock_component_to_be_marked_completed_on_view(1)
self.assertEqual(courseware_page.xblock_components_mark_completed_on_view_value(), ['0', '0', None])
# After page refresh, no blocks in the vertical should be marked as needing to be completed on view.
self.browser.refresh()
courseware_page.wait_for_page()
self.assertEqual(courseware_page.xblock_components_mark_completed_on_view_value(), [None, None, None])
courseware_page.go_to_sequential_position(2)
# Initially, the first block in the second vertical should be marked as needing to be completed on view.
self.assertEqual(
courseware_page.xblock_components_mark_completed_on_view_value(),
[self.COMPLETION_BY_VIEWING_DELAY_MS, None],
)
# Wait and verify that the first block which is completely visible is marked as completed.
courseware_page.wait_for_xblock_component_to_be_marked_completed_on_view(0)
self.assertEqual(courseware_page.xblock_components_mark_completed_on_view_value(), ['0', None])
# After page refresh, no blocks in the vertical should be marked as needing to be completed on view.
self.browser.refresh()
courseware_page.wait_for_page()
self.assertEqual(courseware_page.xblock_components_mark_completed_on_view_value(), [None, None])
def test_render_xblock_publish_completion_is_sent_on_view(self):
"""
Test that when viewing a XBlock in render_xblock, it is correctly marked as completed on view.
"""
block_page = RenderXBlockPage(self.browser, self.html_1_block.locator)
block_page.visit()
block_page.wait_for_page()
# Initially the block should be marked as needing to be completed on view.
self.assertEqual(
block_page.xblock_components_mark_completed_on_view_value(), [self.COMPLETION_BY_VIEWING_DELAY_MS]
)
# Wait and verify that the block is marked as completed on view.
block_page.wait_for_xblock_component_to_be_marked_completed_on_view(0)
self.assertEqual(block_page.xblock_components_mark_completed_on_view_value(), ['0'])
# After page refresh, it should not be marked as needing to be completed on view.
self.browser.refresh()
block_page.wait_for_page()
self.assertEqual(block_page.xblock_components_mark_completed_on_view_value(), [None])
| agpl-3.0 |
JimCircadian/ansible | lib/ansible/modules/net_tools/cloudflare_dns.py | 22 | 24678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudflare_dns
author: "Michael Gruener (@mgruener)"
requirements:
- "python >= 2.6"
version_added: "2.1"
short_description: manage Cloudflare DNS records
description:
- "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
options:
account_api_token:
description:
- >
Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)
required: true
account_email:
description:
- "Account email."
required: true
port:
description: Service port. Required for C(type=SRV)
priority:
description: Record priority. Required for C(type=MX) and C(type=SRV)
default: "1"
proto:
description:
- Service protocol. Required for C(type=SRV).
- Common values are tcp and udp.
- Before Ansible 2.6 only tcp and udp were available.
proxied:
description: Proxy through cloudflare network or just use DNS
type: bool
default: 'no'
version_added: "2.3"
record:
description:
- Record to add. Required if C(state=present). Default is C(@) (e.g. the zone name)
default: "@"
aliases: [ "name" ]
service:
description: Record service. Required for C(type=SRV)
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with C(state=present)
- This will delete all other records with the same record name and type.
state:
description:
- Whether the record(s) should exist or not
choices: [ 'present', 'absent' ]
default: present
timeout:
description:
- Timeout for Cloudflare API calls
default: 30
ttl:
description:
- The TTL to give the new record. Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
default: 1 (automatic)
type:
description:
- The type of DNS record to create. Required if C(state=present)
choices: [ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ]
value:
description:
- The record value. Required for C(state=present)
aliases: [ "content" ]
weight:
description: Service weight. Required for C(type=SRV)
default: "1"
zone:
description:
- The name of the Zone to work with (e.g. "example.com"). The Zone must already exist.
required: true
aliases: ["domain"]
'''
EXAMPLES = '''
# create a test.my.com A record to point to 127.0.0.1
- cloudflare_dns:
zone: my.com
record: test
type: A
value: 127.0.0.1
account_email: test@example.com
account_api_token: dummyapitoken
register: record
# create a my.com CNAME record to example.com
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: present
account_email: test@example.com
account_api_token: dummyapitoken
# change it's ttl
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
ttl: 600
state: present
account_email: test@example.com
account_api_token: dummyapitoken
# and delete the record
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: absent
account_email: test@example.com
account_api_token: dummyapitoken
# create a my.com CNAME record to example.com and proxy through cloudflare's network
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: present
proxied: yes
account_email: test@example.com
account_api_token: dummyapitoken
# create TXT record "test.my.com" with value "unique value"
# delete all other TXT records named "test.my.com"
- cloudflare_dns:
domain: my.com
record: test
type: TXT
value: unique value
state: present
solo: true
account_email: test@example.com
account_api_token: dummyapitoken
# create a SRV record _foo._tcp.my.com
- cloudflare_dns:
domain: my.com
service: foo
proto: tcp
port: 3500
priority: 10
weight: 20
type: SRV
value: fooserver.my.com
'''
RETURN = '''
record:
description: dictionary containing the record data
returned: success, except on record deletion
type: complex
contains:
content:
description: the record content (details depend on record type)
returned: success
type: string
sample: 192.0.2.91
created_on:
description: the record creation date
returned: success
type: string
sample: 2016-03-25T19:09:42.516553Z
data:
description: additional record data
returned: success, if type is SRV
type: dictionary
sample: {
name: "jabber",
port: 8080,
priority: 10,
proto: "_tcp",
service: "_xmpp",
target: "jabberhost.sample.com",
weight: 5,
}
id:
description: the record id
returned: success
type: string
sample: f9efb0549e96abcb750de63b38c9576e
locked:
description: No documentation available
returned: success
type: boolean
sample: False
meta:
description: No documentation available
returned: success
type: dictionary
sample: { auto_added: false }
modified_on:
description: record modification date
returned: success
type: string
sample: 2016-03-25T19:09:42.516553Z
name:
description: the record name as FQDN (including _service and _proto for SRV)
returned: success
type: string
sample: www.sample.com
priority:
description: priority of the MX record
returned: success, if type is MX
type: int
sample: 10
proxiable:
description: whether this record can be proxied through cloudflare
returned: success
type: boolean
sample: False
proxied:
description: whether the record is proxied through cloudflare
returned: success
type: boolean
sample: False
ttl:
description: the time-to-live for the record
returned: success
type: int
sample: 300
type:
description: the record type
returned: success
type: string
sample: A
zone_id:
description: the id of the zone containing the record
returned: success
type: string
sample: abcede0bf9f0066f94029d2e6b73856a
zone_name:
description: the name of the zone containing the record
returned: success
type: string
sample: sample.com
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
class CloudflareAPI(object):
cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
changed = False
def __init__(self, module):
self.module = module
self.account_api_token = module.params['account_api_token']
self.account_email = module.params['account_email']
self.port = module.params['port']
self.priority = module.params['priority']
self.proto = module.params['proto']
self.proxied = module.params['proxied']
self.record = module.params['record']
self.service = module.params['service']
self.is_solo = module.params['solo']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.ttl = module.params['ttl']
self.type = module.params['type']
self.value = module.params['value']
self.weight = module.params['weight']
self.zone = module.params['zone']
if self.record == '@':
self.record = self.zone
if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
self.value = self.value.rstrip('.')
if (self.type == 'SRV'):
if (self.proto is not None) and (not self.proto.startswith('_')):
self.proto = '_' + self.proto
if (self.service is not None) and (not self.service.startswith('_')):
self.service = '_' + self.service
if not self.record.endswith(self.zone):
self.record = self.record + '.' + self.zone
def _cf_simple_api_call(self, api_call, method='GET', payload=None):
headers = {'X-Auth-Email': self.account_email,
'X-Auth-Key': self.account_api_token,
'Content-Type': 'application/json'}
data = None
if payload:
try:
data = json.dumps(payload)
except Exception as e:
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
resp, info = fetch_url(self.module,
self.cf_api_endpoint + api_call,
headers=headers,
data=data,
method=method,
timeout=self.timeout)
if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call, info['status']))
error_msg = ''
if info['status'] == 401:
# Unauthorized
error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 403:
# Forbidden
error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 429:
# Too many requests
error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 405:
# Method not allowed
error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 415:
# Unsupported Media Type
error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 400:
# Bad Request
error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
else:
error_msg += "; The API response was empty"
if content:
try:
result = json.loads(to_text(content, errors='surrogate_or_strict'))
except (json.JSONDecodeError, UnicodeError) as e:
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
# received an error status but no data with details on what failed
if (info['status'] not in [200, 304]) and (result is None):
self.module.fail_json(msg=error_msg)
if not result['success']:
error_msg += "; Error details: "
for error in result['errors']:
error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
if 'error_chain' in error:
for chain_error in error['error_chain']:
error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
self.module.fail_json(msg=error_msg)
return result, info['status']
def _cf_api_call(self, api_call, method='GET', payload=None):
result, status = self._cf_simple_api_call(api_call, method, payload)
data = result['result']
if 'result_info' in result:
pagination = result['result_info']
if pagination['total_pages'] > 1:
next_page = int(pagination['page']) + 1
parameters = ['page={0}'.format(next_page)]
# strip "page" parameter from call parameters (if there are any)
if '?' in api_call:
raw_api_call, query = api_call.split('?', 1)
parameters += [param for param in query.split('&') if not param.startswith('page')]
else:
raw_api_call = api_call
while next_page <= pagination['total_pages']:
raw_api_call += '?' + '&'.join(parameters)
result, status = self._cf_simple_api_call(raw_api_call, method, payload)
data += result['result']
next_page += 1
return data, status
def _get_zone_id(self, zone=None):
if not zone:
zone = self.zone
zones = self.get_zones(zone)
if len(zones) > 1:
self.module.fail_json(msg="More than one zone matches {0}".format(zone))
if len(zones) < 1:
self.module.fail_json(msg="No zone found with name {0}".format(zone))
return zones[0]['id']
def get_zones(self, name=None):
if not name:
name = self.zone
param = ''
if name:
param = '?' + urlencode({'name': name})
zones, status = self._cf_api_call('/zones' + param)
return zones
def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
if not zone_name:
zone_name = self.zone
if not type:
type = self.type
if not record:
record = self.record
# necessary because None as value means to override user
# set module value
if (not value) and (value is not None):
value = self.value
zone_id = self._get_zone_id()
api_call = '/zones/{0}/dns_records'.format(zone_id)
query = {}
if type:
query['type'] = type
if record:
query['name'] = record
if value:
query['content'] = value
if query:
api_call += '?' + urlencode(query)
records, status = self._cf_api_call(api_call)
return records
def delete_dns_records(self, **kwargs):
params = {}
for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone']:
if param in kwargs:
params[param] = kwargs[param]
else:
params[param] = getattr(self, param)
records = []
content = params['value']
search_record = params['record']
if params['type'] == 'SRV':
if not (params['value'] is None or params['value'] == ''):
content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
if params['solo']:
search_value = None
else:
search_value = content
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
for rr in records:
if params['solo']:
if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
else:
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
return self.changed
def ensure_dns_record(self, **kwargs):
params = {}
for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone']:
if param in kwargs:
params[param] = kwargs[param]
else:
params[param] = getattr(self, param)
search_value = params['value']
search_record = params['record']
new_record = None
if (params['type'] is None) or (params['record'] is None):
self.module.fail_json(msg="You must provide a type and a record to create a new record")
if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
if not params['value']:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
# there can only be one CNAME per record
# ignoring the value when searching for existing
# CNAME records allows us to update the value if it
# changes
if params['type'] == 'CNAME':
search_value = None
new_record = {
"type": params['type'],
"name": params['record'],
"content": params['value'],
"ttl": params['ttl']
}
if (params['type'] in ['A', 'AAAA', 'CNAME']):
new_record["proxied"] = params["proxied"]
if params['type'] == 'MX':
for attr in [params['priority'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide priority and a value to create this record type")
new_record = {
"type": params['type'],
"name": params['record'],
"content": params['value'],
"priority": params['priority'],
"ttl": params['ttl']
}
if params['type'] == 'SRV':
for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
srv_data = {
"target": params['value'],
"port": params['port'],
"weight": params['weight'],
"priority": params['priority'],
"name": params['record'][:-len('.' + params['zone'])],
"proto": params['proto'],
"service": params['service']
}
new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
zone_id = self._get_zone_id(params['zone'])
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
# record already exists, check if it must be updated
if len(records) == 1:
cur_record = records[0]
do_update = False
if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
do_update = True
if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
do_update = True
if ('data' in new_record) and ('data' in cur_record):
if (cur_record['data'] > new_record['data']) - (cur_record['data'] < new_record['data']):
do_update = True
if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
do_update = True
if do_update:
if self.module.check_mode:
result = new_record
else:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
self.changed = True
return result, self.changed
else:
return records, self.changed
if self.module.check_mode:
result = new_record
else:
result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
self.changed = True
return result, self.changed
def main():
module = AnsibleModule(
argument_spec=dict(
account_api_token=dict(required=True, no_log=True, type='str'),
account_email=dict(required=True, type='str'),
port=dict(required=False, default=None, type='int'),
priority=dict(required=False, default=1, type='int'),
proto=dict(required=False, default=None, type='str'),
proxied=dict(required=False, default=False, type='bool'),
record=dict(required=False, default='@', aliases=['name'], type='str'),
service=dict(required=False, default=None, type='str'),
solo=dict(required=False, default=None, type='bool'),
state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
timeout=dict(required=False, default=30, type='int'),
ttl=dict(required=False, default=1, type='int'),
type=dict(required=False, default=None, choices=['A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF'], type='str'),
value=dict(required=False, default=None, aliases=['content'], type='str'),
weight=dict(required=False, default=1, type='int'),
zone=dict(required=True, default=None, aliases=['domain'], type='str'),
),
supports_check_mode=True,
required_if=([
('state', 'present', ['record', 'type', 'value']),
('state', 'absent', ['record']),
('type', 'SRV', ['proto', 'service']),
]
),
)
if module.params['type'] == 'SRV':
if not ((module.params['weight'] is not None and module.params['port'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['weight'] is None and module.params['port'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
changed = False
cf_api = CloudflareAPI(module)
# sanity checks
if cf_api.is_solo and cf_api.state == 'absent':
module.fail_json(msg="solo=true can only be used with state=present")
# perform add, delete or update (only the TTL can be updated) of one or
# more records
if cf_api.state == 'present':
# delete all records matching record name + type
if cf_api.is_solo:
changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
result, changed = cf_api.ensure_dns_record()
if isinstance(result, list):
module.exit_json(changed=changed, result={'record': result[0]})
else:
module.exit_json(changed=changed, result={'record': result})
else:
# force solo to False, just to be sure
changed = cf_api.delete_dns_records(solo=False)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
MSMBA/msmba-workflow-winter2016 | msmba-workflow-winter2016-team5/src/healthcare/PhysicianAsstApplication.py | 10 | 1133 | # This code is part of the MWP System
# Copyright (c) 2012 Benjamin Lubin (blubin@bu.com)
# Published under and subject to the GPLv2 license available at http://www.gnu.org/licenses/gpl-2.0.html
'''
Created on Dec 18, 2012
@author: blubin
'''
from frontend.roleApplication import RoleApplication;
from frontend.form import Type;
from HealthcareConstants import theflowname
class PhysicianAsstApplication(RoleApplication):
""" The PhysicianAsst User Interface """
def __init__(self):
super(PhysicianAsstApplication, self).__init__(theflowname, "PhysicianAsst");
self.register_transition_step("Admit", self.patient_admit_form_creator, name_fields=["sequence", "FirstName", "LastName", "Birthday"]);
def patient_admit_form_creator(self, stepname, form):
form.add_task_label(fields=["FirstName", "LastName", "Birthday"]);
form.add_field(Type.FLOAT, "Weight");
form.add_field(Type.FLOAT, "Temperature");
form.add_field(Type.SHORTSTRING, "BloodPressure");
if __name__ == '__main__':
app = PhysicianAsstApplication();
app.MainLoop(); | gpl-2.0 |
takeshineshiro/nova | nova/api/openstack/compute/hide_server_addresses.py | 36 | 2997 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extension for hiding server addresses in certain states."""
from oslo_config import cfg
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import vm_states
opts = [
cfg.ListOpt('osapi_hide_server_address_states',
default=[vm_states.BUILDING],
help='List of instance states that should hide network info'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
ALIAS = 'os-hide-server-addresses'
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class Controller(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
hidden_states = CONF.osapi_hide_server_address_states
# NOTE(jkoelker) _ is not considered uppercase ;)
valid_vm_states = [getattr(vm_states, state)
for state in dir(vm_states)
if state.isupper()]
self.hide_address_states = [state.lower()
for state in hidden_states
if state in valid_vm_states]
def _perhaps_hide_addresses(self, instance, resp_server):
if instance.get('vm_state') in self.hide_address_states:
resp_server['addresses'] = {}
@wsgi.extends
def show(self, req, resp_obj, id):
resp = resp_obj
if not authorize(req.environ['nova.context']):
return
if 'server' in resp.obj and 'addresses' in resp.obj['server']:
instance = req.get_db_instance(id)
self._perhaps_hide_addresses(instance, resp.obj['server'])
@wsgi.extends
def detail(self, req, resp_obj):
resp = resp_obj
if not authorize(req.environ['nova.context']):
return
for server in list(resp.obj['servers']):
if 'addresses' in server:
instance = req.get_db_instance(server['id'])
self._perhaps_hide_addresses(instance, server)
class HideServerAddresses(extensions.V3APIExtensionBase):
"""Support hiding server addresses in certain states."""
name = 'HideServerAddresses'
alias = ALIAS
version = 1
def get_controller_extensions(self):
return [extensions.ControllerExtension(self, 'servers', Controller())]
def get_resources(self):
return []
| apache-2.0 |
edgarli/proj8 | env/lib/python3.4/site-packages/simplejson/tests/test_for_json.py | 143 | 2767 | import unittest
import simplejson as json
class ForJson(object):
def for_json(self):
return {'for_json': 1}
class NestedForJson(object):
def for_json(self):
return {'nested': ForJson()}
class ForJsonList(object):
def for_json(self):
return ['list']
class DictForJson(dict):
def for_json(self):
return {'alpha': 1}
class ListForJson(list):
def for_json(self):
return ['list']
class TestForJson(unittest.TestCase):
def assertRoundTrip(self, obj, other, for_json=True):
if for_json is None:
# None will use the default
s = json.dumps(obj)
else:
s = json.dumps(obj, for_json=for_json)
self.assertEqual(
json.loads(s),
other)
def test_for_json_encodes_stand_alone_object(self):
self.assertRoundTrip(
ForJson(),
ForJson().for_json())
def test_for_json_encodes_object_nested_in_dict(self):
self.assertRoundTrip(
{'hooray': ForJson()},
{'hooray': ForJson().for_json()})
def test_for_json_encodes_object_nested_in_list_within_dict(self):
self.assertRoundTrip(
{'list': [0, ForJson(), 2, 3]},
{'list': [0, ForJson().for_json(), 2, 3]})
def test_for_json_encodes_object_nested_within_object(self):
self.assertRoundTrip(
NestedForJson(),
{'nested': {'for_json': 1}})
def test_for_json_encodes_list(self):
self.assertRoundTrip(
ForJsonList(),
ForJsonList().for_json())
def test_for_json_encodes_list_within_object(self):
self.assertRoundTrip(
{'nested': ForJsonList()},
{'nested': ForJsonList().for_json()})
def test_for_json_encodes_dict_subclass(self):
self.assertRoundTrip(
DictForJson(a=1),
DictForJson(a=1).for_json())
def test_for_json_encodes_list_subclass(self):
self.assertRoundTrip(
ListForJson(['l']),
ListForJson(['l']).for_json())
def test_for_json_ignored_if_not_true_with_dict_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
DictForJson(a=1),
{'a': 1},
for_json=for_json)
def test_for_json_ignored_if_not_true_with_list_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
ListForJson(['l']),
['l'],
for_json=for_json)
def test_raises_typeerror_if_for_json_not_true_with_object(self):
self.assertRaises(TypeError, json.dumps, ForJson())
self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False)
| artistic-2.0 |
zerotired/kotori | kotori/daq/decoder/__init__.py | 1 | 1246 | # -*- coding: utf-8 -*-
# (c) 2019-2020 Andreas Motl <andreas@getkotori.org>
from kotori.daq.decoder.airrohr import AirrohrDecoder
from kotori.daq.decoder.tasmota import TasmotaSensorDecoder, TasmotaStateDecoder
from kotori.daq.decoder.schema import MessageType
class DecoderInfo:
def __init__(self):
self.message_type = None
self.decoder = None
class DecoderManager:
def __init__(self, topology):
self.topology = topology
self.info = DecoderInfo()
def probe(self):
if 'slot' not in self.topology:
return False
# Airrohr
if self.topology.slot.endswith('airrohr.json'):
self.info.message_type = MessageType.DATA_CONTAINER
self.info.decoder = AirrohrDecoder
return True
# Tasmota Sensor
if self.topology.slot.endswith('SENSOR'):
self.info.message_type = MessageType.DATA_CONTAINER
self.info.decoder = TasmotaSensorDecoder
return True
# Tasmota State
if self.topology.slot.endswith('STATE'):
self.info.message_type = MessageType.DATA_CONTAINER
self.info.decoder = TasmotaStateDecoder
return True
return False
| agpl-3.0 |
apache/incubator-allura | ForgeDiscussion/forgediscussion/tasks.py | 3 | 1544 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from pylons import tmpl_context as c
from allura.lib.decorators import task
log = logging.getLogger(__name__)
@task
def calc_forum_stats(shortname):
from forgediscussion import model as DM
forum = DM.Forum.query.get(
shortname=shortname, app_config_id=c.app.config._id)
if forum is None:
log.error("Error looking up forum: %r", shortname)
return
forum.update_stats()
@task
def calc_thread_stats(thread_id):
from forgediscussion import model as DM
thread = DM.ForumThread.query.get(_id=thread_id)
if thread is None:
log.error("Error looking up thread: %r", thread_id)
thread.update_stats()
| apache-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/tests/modeltests/custom_columns/models.py | 34 | 1244 | """
17. Custom column/table names
If your database column name is different than your model attribute, use the
``db_column`` parameter. Note that you'll use the field's name, not its column
name, in API usage.
If your database table name is different than your model name, use the
``db_table`` Meta attribute. This has no effect on the API used to
query the database.
If you need to use a table name for a many-to-many relationship that differs
from the default generated name, use the ``db_table`` parameter on the
``ManyToManyField``. This has no effect on the API for querying the database.
"""
from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=30, db_column='firstname')
last_name = models.CharField(max_length=30, db_column='last')
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Meta:
db_table = 'my_author_table'
ordering = ('last_name','first_name')
class Article(models.Model):
headline = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, db_table='my_m2m_table')
def __unicode__(self):
return self.headline
class Meta:
ordering = ('headline',)
| apache-2.0 |
sadaf2605/django | tests/gis_tests/gdal_tests/test_ds.py | 44 | 11126 | import os
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from ..test_data import TEST_DATA, TestDS, get_ds_file
if HAS_GDAL:
from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, GDALException, OGRIndexError, GDAL_VERSION
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
# List of acceptable data sources.
ds_list = (
TestDS(
'test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.35011, 0.166623, -0.524093, 0.824508), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
field_values={
'dbl': [float(i) for i in range(1, 6)],
'int': list(range(1, 6)),
'str': [str(i) for i in range(1, 6)],
},
fids=range(5)
),
TestDS(
'test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D',
driver='OGR_VRT' if GDAL_VERSION >= (2, 0) else 'VRT',
fields={
'POINT_X': OFTString,
'POINT_Y': OFTString,
'NUM': OFTString,
}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={
'POINT_X': ['1.0', '5.0', '100.0'],
'POINT_Y': ['2.0', '23.0', '523.5'],
'NUM': ['5', '17', '23'],
},
fids=range(1, 4)
),
TestDS(
'test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.01513, -0.558245, 0.161876, 0.839637), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
)
)
bad_ds = (TestDS('foo'),)
@skipUnless(HAS_GDAL, "GDAL is required")
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
with self.assertRaises(OGRIndexError):
ds[len(ds)]
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
with self.assertRaises(GDALException):
DataSource(source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and its properties
self.assertIsInstance(layer.extent, Envelope)
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds:
self.assertIn(f, source.fields)
# Negative FIDs are not allowed.
with self.assertRaises(OGRIndexError):
layer.__getitem__(-1)
with self.assertRaises(OGRIndexError):
layer.__getitem__(50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"""
Ensure OGR objects keep references to the objects they belong to.
"""
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
# Same issue for Feature/Field objects, see #18640
self.assertEqual(str(lyr[0]['str']), "1")
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertIsInstance(feat[k], v)
# Testing Feature.__iter__
for fld in feat:
self.assertIn(fld.name, source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(
source.srs_wkt,
# Depending on lib versions, WGS_84 might be WGS_1984
g.srs.wkt.replace('SPHEROID["WGS_84"', 'SPHEROID["WGS_1984"')
)
def test06_spatial_filter(self):
"Testing the Layer.spatial_filter property."
ds = DataSource(get_ds_file('cities', 'shp'))
lyr = ds[0]
# When not set, it should be None.
self.assertIsNone(lyr.spatial_filter)
# Must be set a/an OGRGeometry or 4-tuple.
with self.assertRaises(TypeError):
lyr._set_spatial_filter('foo')
# Setting the spatial filter with a tuple/list with the extent of
# a buffer centering around Pueblo.
with self.assertRaises(ValueError):
lyr._set_spatial_filter(list(range(5)))
filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Pueblo', feats[0].get('Name'))
# Setting the spatial filter with an OGRGeometry for buffer centering
# around Houston.
filter_geom = OGRGeometry(
'POLYGON((-96.363151 28.763374,-94.363151 28.763374,'
'-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))'
)
lyr.spatial_filter = filter_geom
self.assertEqual(filter_geom, lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Houston', feats[0].get('Name'))
# Clearing the spatial filter by setting it to None. Now
# should indicate that there are 3 features in the Layer.
lyr.spatial_filter = None
self.assertEqual(3, len(lyr))
def test07_integer_overflow(self):
"Testing that OFTReal fields, treated as OFTInteger, do not overflow."
# Using *.dbf from Census 2010 TIGER Shapefile for Texas,
# which has land area ('ALAND10') stored in a Real field
# with no precision.
ds = DataSource(os.path.join(TEST_DATA, 'texas.dbf'))
feat = ds[0][0]
# Reference value obtained using `ogrinfo`.
self.assertEqual(676586997978, feat.get('ALAND10'))
| bsd-3-clause |
tinyms/ArchiveX | tinyms/core/common.py | 1 | 14690 | __author__ = 'tinyms'
#coding=UTF8
import os
import sys
import re
import codecs
import hashlib
import json
#import urllib.request
#import urllib.parse
import time
import datetime
import decimal
import uuid
from imp import find_module, load_module, acquire_lock, release_lock
from tornado.template import Template
# import psycopg2
# import psycopg2.extras
#
#
# class Postgres():
# DATABASE_NAME = "postgres"
# USER_NAME = "postgres"
# PASSWORD = ""
#
# @staticmethod
# #Connect to Postgres Database
# def open():
# return psycopg2.connect(database=Postgres.DATABASE_NAME,
# user=Postgres.USER_NAME,
# password=Postgres.PASSWORD)
#
# @staticmethod
# def update(sql, params, return_col_name=None):
#
# """
# for Insert,Update,Delete
# :param sql:
# :param params:
# :param return_col_name: last insert row id etc.
# :return:
# """
# if return_col_name:
# sql += " RETURNING %s" % return_col_name
# cnn = None
# try:
# cnn = Postgres.open()
# cur = cnn.cursor()
# cur.execute(sql, params)
# if return_col_name:
# result = cur.fetchone()[0]
# else:
# result = True
# cnn.commit()
# except psycopg2.DatabaseError as e:
# print("Error %s" % e)
# cnn.rollback()
# result = False
# finally:
# if cnn:
# cnn.close()
#
# return result
#
# @staticmethod
# #Batch Insert,Update,Delete
# def update_many(sql, arr_params):
# try:
# cnn = Postgres.open()
# cur = cnn.cursor()
# cur.executemany(sql, arr_params)
# cnn.commit()
# except psycopg2.DatabaseError as e:
# print("Error %s" % e)
# finally:
# if cnn:
# cnn.close()
#
# @staticmethod
# #Query DataSet
# def many(sql, params=(), callback=None):
# dataset = list()
# cnn = None
# try:
# cnn = Postgres.open()
# cur = cnn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# cur.execute(sql, params)
# rows = cur.fetchall()
# for row in rows:
# c = row.copy()
# if callback:
# callback(c)
# dataset.append(c)
# cur.close()
# except psycopg2.DatabaseError as e:
# print("Error %s" % e)
# finally:
# if cnn:
# cnn.close()
# return dataset
#
# @staticmethod
# #First Row Data
# def row(sql, params, callback=None):
# items = Postgres.many(sql, params, callback)
# if len(items) > 0:
# return items[0]
# return None
#
# @staticmethod
# #First Column Data
# def col(sql, params, callback=None):
# items = Postgres.many(sql, params, callback)
# cols = list()
# for item in items:
# values = [i for i in item.values()]
# if len(values) > 0:
# cols.append(values[0])
# return cols
#
# @staticmethod
# #First Row And First Column
# def one(sql, params=(), callback=None):
# first_col = Postgres.col(sql, params, callback)
# if len(first_col) > 0:
# return first_col[0]
# return None
#
# @staticmethod
# #Store Proc, Return Single Result
# def proc_one(name, params, callback=None):
# first_col = Postgres.proc_many(name, params, callback)
# if len(first_col) > 0:
# return first_col[0]
# return None
#
# @staticmethod
# #Store Proc, Return DataSet
# def proc_many(name, params, callback=None):
# dataset = list()
# cnn = None
# try:
# cnn = Postgres.open()
# cur = cnn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# rows = cur.callproc(name, params)
# for row in rows:
# c = row.copy()
# if callback:
# callback(c)
# dataset.append(c)
# cur.close()
# except psycopg2.DatabaseError as e:
# print("Error %s" % e)
# finally:
# if cnn:
# cnn.close()
# return dataset
#
# @staticmethod
# #Return all cols name from current Query cursor
# def col_names(cur):
# names = list()
# for col in cur.description:
# names.append(col.name)
# return names
class JsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
elif isinstance(o, datetime.date):
return Utils.format_datetime_short(o)
elif isinstance(o, datetime.datetime):
return Utils.format_datetime_short(o)
elif isinstance(o, datetime.time):
return Utils.format_time(o)
super(JsonEncoder, self).default(o)
class Utils():
def __init__(self):
pass
@staticmethod
def text_read(f_name, join=True):
if not os.path.exists(f_name):
return ""
f = codecs.open(f_name, "r", "utf-8")
all_ = f.readlines()
f.close()
if join:
return "".join(all_)
return all
@staticmethod
def text_write(f_name, lines=list(), suffix="\n"):
f = codecs.open(f_name, "w+", "utf-8")
if isinstance(lines, list):
for line in lines:
f.write(line + suffix)
else:
f.write(lines)
f.write(suffix)
f.close()
# @staticmethod
# def url_with_params(url):
# r1 = urllib.parse.urlsplit(url)
# if r1.query != "":
# return True
# return False
@staticmethod
def trim(text):
return "".join(text.split())
@staticmethod
def uniq_index():
return uuid.uuid1()
@staticmethod
def render(tpl_text, context):
"""
render a template
:param tpl_text: template text
:param context: dict object
:return: str
"""
tpl = Template(tpl_text)
return tpl.generate(context)
@staticmethod
def md5(s):
h = hashlib.new('ripemd160')
h.update(bytearray(s.encode("utf8")))
return h.hexdigest()
@staticmethod
def current_datetime():
from datetime import datetime as tmp
return tmp.now()
@staticmethod
def mkdirs(path):
isexists = os.path.exists(path)
if not isexists:
os.makedirs(path)
return True
else:
return False
@staticmethod
def parse_int(text):
nums = Utils.parse_int_array(text)
if len(nums) > 0:
return int(nums[0])
return None
@staticmethod
def parse_int_array(text):
arr = list()
p = re.compile("[-]?\\d+", re.M)
nums = p.findall(text)
if len(nums) > 0:
arr = [int(s) for s in nums]
return arr
@staticmethod
def parse_time_text(text):
if not text:
return ""
p = re.compile("\\d{2}:\\d{2}")
dates = p.findall(text)
if len(dates) > 0:
return dates[0]
return ""
@staticmethod
def parse_time(text):
time_text = Utils.parse_time_text(text)
if not time_text:
return None
time_struct = time.strptime(time_text, "%H:%M")
return datetime.time(time_struct.tm_hour, time_struct.tm_min)
@staticmethod
def parse_date_text(text):
if not text:
return ""
p = re.compile("\\d{4}-\\d{2}-\\d{2}")
dates = p.findall(text)
if len(dates) > 0:
return dates[0]
return ""
@staticmethod
def parse_date(text):
date_text = Utils.parse_date_text(text)
if not date_text:
return None
from datetime import datetime
return datetime.strptime(date_text, "%Y-%m-%d").date()
@staticmethod
def parse_datetime_text(text):
if not text:
return ""
p = "\\d{4}-\\d{2}-\\d{2}\\s{1}\\d{2}:\\d{2}"
r = re.compile(p)
matchs = r.findall(text)
if len(matchs) > 0:
return matchs[0]
return ""
@staticmethod
def parse_datetime(text):
datetime_text = Utils.parse_datetime_text(text)
if not datetime_text:
return None
from datetime import datetime
return datetime.strptime(datetime_text, "%Y-%m-%d %H:%M")
@staticmethod
def parse_float(text):
floats = Utils.parse_float_array(text)
if len(floats) > 0:
return float(floats[0])
return None
@staticmethod
def parse_float_array(text):
p = re.compile("[-]?\\d+\\.\\d+", re.M)
return [float(s) for s in p.findall(text)]
@staticmethod
def parse_number_array(text):
"""
int or float
:param text:
:return:
"""
p = re.compile("[-]?\\d+[\\.]?[\\d]*", re.M)
return [float(s) for s in p.findall(text)]
@staticmethod
def encode(obj):
return json.dumps(obj, cls=JsonEncoder)
@staticmethod
def decode(text):
return json.loads(text)
# @staticmethod
# def download(url, save_path):
# try:
# f = urllib.request.urlopen(url, timeout=15)
# data = f.read()
# with open(save_path, "wb") as cache:
# cache.write(data)
# except urllib.error.URLError as ex:
# info = sys.exc_info()
# print(info[0], ":", info[1], ex)
@staticmethod
def matrix_reverse(arr):
"""
矩阵翻转
:param arr:
:return:
"""
return [[r[col] for r in arr] for col in range(len(arr[0]))]
@staticmethod
def combine_text_files(folder, target_file_name):
text = Utils.text_read(os.path.join(folder, "combine.list"))
cfg = json.loads(text)
for key in cfg.keys():
files = cfg[key]
if len(files) > 0:
combine_file = os.path.join(folder, target_file_name + "." + key)
if os.path.exists(combine_file):
os.remove(combine_file)
all_ = list()
for file_ in files:
path = os.path.join(folder, file_)
all_.append(Utils.text_read(path))
Utils.text_write(combine_file, all_)
pass
@staticmethod
def is_email(s):
p = r"[^@]+@[^@]+\.[^@]+"
if re.match(p, s):
return True
return False
@staticmethod
def email_account_name(s):
#匹配@前面的字符串
p = r".*(?=@)"
r = re.compile(p)
matchs = r.findall(s)
if len(matchs) > 0:
return matchs[0]
return ""
@staticmethod
def format_year_month(date_obj):
if not date_obj:
return ""
return date_obj.strftime('%Y-%m')
@staticmethod
def format_datetime(date_obj):
if not date_obj:
return ""
return date_obj.strftime('%Y-%m-%d %H:%M:%S')
@staticmethod
def format_datetime_short(date_obj):
if not date_obj:
return ""
return date_obj.strftime('%Y-%m-%d %H:%M')
@staticmethod
def format_date(date_obj):
if not date_obj:
return ""
return date_obj.strftime('%Y-%m-%d')
@staticmethod
def format_time(datetime_obj):
if not datetime_obj:
return ""
if isinstance(datetime_obj, datetime.time):
curr_date = Utils.current_datetime()
dt = datetime.datetime.combine(curr_date, datetime_obj)
return dt.strftime('%H:%M')
elif isinstance(datetime_obj, datetime.datetime):
return datetime_obj.strftime('%H:%M')
return ""
class Plugin():
def __init__(self):
pass
ObjectPool = dict()
@staticmethod
def one(type_):
plugins = Plugin.get(type_)
if len(plugins) > 0:
return plugins[0]
return None
@staticmethod
def get(type_, class_full_name=""):
"""
get plugin class object instance
:param type_: extends plugin interface
:param class_full_name: class name with module name
:return: a object
"""
if not class_full_name:
return Plugin.ObjectPool.get(type_)
else:
arr = Plugin.ObjectPool.get(type_)
for t in arr:
name = "%s.%s" % (t.__class__.__module__, t.__class__.__name__)
if name.lower() == class_full_name.lower():
return t
@staticmethod
def load():
Plugin.ObjectPool.clear()
path = os.path.join(os.getcwd(), "config")
wid = os.walk(path)
plugins = []
print("Search config modules..")
for rootDir, pathList, fileList in wid:
if rootDir.find("__pycache__") != -1:
continue
for file_ in fileList:
if file_.find("__init__.py") != -1:
continue
#re \\.py[c]?$
if file_.endswith(".py") or file_.endswith(".pyc"):
plugins.append((os.path.splitext(file_)[0], rootDir))
print(plugins)
print("Instance all Config class.")
for (name, dir_) in plugins:
try:
acquire_lock()
file_, filename, desc = find_module(name, [dir_])
prev = sys.modules.get(name)
if prev:
del sys.modules[name]
module_ = load_module(name, file_, filename, desc)
finally:
release_lock()
if hasattr(module_, "__export__"):
attrs = [getattr(module_, x) for x in module_.__export__]
for attr in attrs:
parents = attr.__bases__
if len(parents) > 0:
parent = parents[0]
if not Plugin.ObjectPool.get(parent):
Plugin.ObjectPool[parent] = [attr()]
else:
Plugin.ObjectPool[parent].append(attr())
print("Config init completed.") | bsd-3-clause |
AlecAivazis/nautilus | nautilus/auth/primitives/passwordHash.py | 2 | 2685 | # from: http://variable-scope.com/posts/storing-and-verifying-passwords-with-sqlalchemy
import bcrypt
class PasswordHash:
""" This is a wrapper class over password hashes that abstracts equality """
def __init__(self, hash_, rounds=None):
# make sure the hash is valid
if len(hash_) != 60:
raise ValueError('bcrypt hash should be 60 chars.')
elif hash_.count('$'.encode('utf-8')) != 3:
raise ValueError('bcrypt hash should have 3x "$".')
# save the required instance variables
self.hash = hash_
# figure out the current strength based on the saved hash
self.rounds = int(str(self.hash).split('$')[2])
# the intended number of rounds (in case there is an upgrade)
self.desired_rounds = rounds or self.rounds
# this allows us to easily check if a candidate password matches the hash
# using: hash == 'foo'
def __eq__(self, candidate):
"""Hashes the candidate string and compares it to the stored hash."""
if isinstance(candidate, str):
# convert it to a byte string
candidate = candidate.encode('utf-8')
# if the candidate matches the saved hash
if self.hash == bcrypt.hashpw(candidate, self.hash):
# if the computed number of rounds is less than the designated one
if self.rounds < self.desired_rounds:
# rehash the password
self.rehash(candidate)
return True
# otherwise the password doesn't match
else:
return False
def __repr__(self):
"""Simple object representation."""
return '<{}: {}>'.format(type(self).__name__, self.hash)
@classmethod
def new(cls, password, rounds):
"""Creates a PasswordHash from the given password."""
if isinstance(password, str):
password = password.encode('utf-8')
return cls(cls._new(password, rounds))
@classmethod
def coerce(cls, key, value):
"""Ensure that loaded values are PasswordHashes."""
if isinstance(value, PasswordHash):
return value
return super(PasswordHash, cls).coerce(key, value)
@staticmethod
def _new(password, rounds):
"""
Returns a new bcrypt hash for the given password and rounds.
note: Implemented to reduce repitition in `new` and `rehash`.
"""
return bcrypt.hashpw(password, bcrypt.gensalt(rounds))
def rehash(self, password):
"""Recreates the internal hash."""
self.hash = self._new(password, self.desired_rounds)
self.rounds = self.desired_rounds
| mit |
theguardian/vanilla2tender | cherrypy/lib/jsontools.py | 39 | 3666 | import sys
import cherrypy
from cherrypy._cpcompat import basestring, ntou, json, json_encode, json_decode
def json_processor(entity):
"""Read application/json data into request.json."""
if not entity.headers.get(ntou("Content-Length"), ntou("")):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
cherrypy.serving.request.json = json_decode(body.decode('utf-8'))
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
force=True, debug=False, processor = json_processor):
"""Add a processor to parse JSON request entities:
The default processor places the parsed data into request.json.
Incoming request entities which match the given content_type(s) will
be deserialized from JSON to the Python equivalent, and the result
stored at cherrypy.request.json. The 'content_type' argument may
be a Content-Type string or a list of allowable Content-Type strings.
If the 'force' argument is True (the default), then entities of other
content types will not be allowed; "415 Unsupported Media Type" is
raised instead.
Supply your own processor to use a custom decoder, or to handle the parsed
data differently. The processor can be configured via
tools.json_in.processor or via the decorator method.
Note that the deserializer requires the client send a Content-Length
request header, or it will raise "411 Length Required". If for any
other reason the request entity cannot be deserialized from JSON,
it will raise "400 Bad Request: Invalid JSON document".
You must be using Python 2.6 or greater, or have the 'simplejson'
package importable; otherwise, ValueError is raised during processing.
"""
request = cherrypy.serving.request
if isinstance(content_type, basestring):
content_type = [content_type]
if force:
if debug:
cherrypy.log('Removing body processors %s' %
repr(request.body.processors.keys()), 'TOOLS.JSON_IN')
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an entity of content type %s' %
', '.join(content_type))
for ct in content_type:
if debug:
cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN')
request.body.processors[ct] = processor
def json_handler(*args, **kwargs):
value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)
return json_encode(value)
def json_out(content_type='application/json', debug=False, handler=json_handler):
"""Wrap request.handler to serialize its output to JSON. Sets Content-Type.
If the given content_type is None, the Content-Type response header
is not set.
Provide your own handler to use a custom encoder. For example
cherrypy.config['tools.json_out.handler'] = <function>, or
@json_out(handler=function).
You must be using Python 2.6 or greater, or have the 'simplejson'
package importable; otherwise, ValueError is raised during processing.
"""
request = cherrypy.serving.request
if debug:
cherrypy.log('Replacing %s with JSON handler' % request.handler,
'TOOLS.JSON_OUT')
request._json_inner_handler = request.handler
request.handler = handler
if content_type is not None:
if debug:
cherrypy.log('Setting Content-Type to %s' % content_type, 'TOOLS.JSON_OUT')
cherrypy.serving.response.headers['Content-Type'] = content_type
| gpl-2.0 |
tarasane/h2o-3 | h2o-py/tests/testdir_munging/pyunit_countmatches.py | 4 | 1388 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def countmatches_check():
# Connect to a pre-existing cluster
frame = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
# single column (frame)
result = frame["C5"].countmatches("o")
assert result.nrow == 150 and result.ncol == 1
assert result[0,0] == 1 and result[50,0] == 2 and result[100,0] == 0, "Expected 1, 2, 0 but got {0}, {1}, and " \
"{2}".format(result[0,0], result[50,0], result[100,0])
# single column (vec)
vec = frame["C5"]
result = vec.countmatches("ic")
assert result.nrow == 150 and result.ncol == 1
assert result[0,0] == 0 and result[50,0] == 1 and result[100,0] == 1, "Expected 0, 1, 1 but got {0}, {1}, and " \
"{2}".format(result[0,0], result[50,0], result[100,0])
# array of targets
vec = frame["C5"]
result = vec.countmatches(["ic","ri", "ca"])
assert result.nrow == 150 and result.ncol == 1
assert result[0,0] == 1 and result[50,0] == 2 and result[100,0] == 3, "Expected 1, 2, 3 but got {0}, {1}, and " \
"{2}".format(result[0,0], result[50,0], result[100,0])
if __name__ == "__main__":
tests.run_test(sys.argv, countmatches_check)
| apache-2.0 |
KurtDeGreeff/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/colorama/ansitowin32.py | 442 | 9262 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
winterm = None
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\033\]((?:.|;)*?)(\x07)') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
on_emulated_windows = on_windows and 'TERM' in os.environ
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows and not on_emulated_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and not wrapped.closed and not on_emulated_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.wrapped.closed and is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
| gpl-3.0 |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/servicebus/tests/latest/test_servicebus_subscription_commands.py | 3 | 4818 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# AZURE CLI SERVICEBUS - CRUD TEST DEFINITIONS
import time
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only)
from knack.util import CLIError
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
class SBSubscriptionCRUDScenarioTest(ScenarioTest):
from azure_devtools.scenario_tests import AllowLargeResponse
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_sb_subscription')
def test_sb_subscription(self, resource_group):
self.kwargs.update({
'namespacename': self.create_random_name(prefix='sb-nscli', length=20),
'tags': {'tag1: value1', 'tag2: value2'},
'sku': 'Standard',
'tier': 'Standard',
'authoname': self.create_random_name(prefix='cliAutho', length=20),
'defaultauthorizationrule': 'RootManageSharedAccessKey',
'accessrights': 'Send, Listen',
'primary': 'PrimaryKey',
'secondary': 'SecondaryKey',
'topicname': self.create_random_name(prefix='sb-topiccli', length=25),
'topicauthoname': self.create_random_name(prefix='cliTopicAutho', length=25),
'subscriptionname': self.create_random_name(prefix='sb-subscli', length=25),
'lockduration': 'PT4M',
'defaultmessagetimetolive': 'PT7M',
'autodeleteonidle': 'P9D',
'maxdelivery': '3',
'false': 'false',
'true': 'true'
})
# Create Namespace
self.cmd(
'servicebus namespace create --resource-group {rg} --name {namespacename} --tags {tags} --sku {sku}',
checks=[self.check('sku.name', '{sku}')])
# Get Created Namespace
self.cmd('servicebus namespace show --resource-group {rg} --name {namespacename}',
checks=[self.check('sku.name', '{sku}')])
# Create Topic
self.cmd('servicebus topic create --resource-group {rg} --namespace-name {namespacename} --name {topicname}',
checks=[self.check('name', '{topicname}')])
# Get Topic
self.cmd('servicebus topic show --resource-group {rg} --namespace-name {namespacename} --name {topicname}',
checks=[self.check('name', '{topicname}')])
# Create Subscription
self.cmd(
'servicebus topic subscription create --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --name {subscriptionname}',
checks=[self.check('name', '{subscriptionname}')])
# Get Create Subscription
self.cmd(
'servicebus topic subscription show --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --name {subscriptionname}',
checks=[self.check('name', '{subscriptionname}')])
# Get list of Subscription+
self.cmd(
'servicebus topic subscription list --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname}')
# update Subscription
self.cmd(
'servicebus topic subscription update --resource-group {rg} --namespace-name {namespacename} --topic-name '
'{topicname} --name {subscriptionname} --max-delivery {maxdelivery} '
'--default-message-time-to-live {defaultmessagetimetolive} --dead-letter-on-filter-exceptions {false}'
' --enable-dead-lettering-on-message-expiration {false} --auto-delete-on-idle {autodeleteonidle}'
' --default-message-time-to-live {defaultmessagetimetolive} --lock-duration {lockduration}',
checks=[self.check('name', '{subscriptionname}'),
self.check('lockDuration', '0:04:00'),
self.check('maxDeliveryCount', '3'),
self.check('defaultMessageTimeToLive', '0:07:00'),
self.check('autoDeleteOnIdle', '9 days, 0:00:00'),
self.check('deadLetteringOnFilterEvaluationExceptions', 'False')])
# Delete Subscription
self.cmd(
'servicebus topic subscription delete --resource-group {rg} --namespace-name {namespacename} --topic-name {topicname} --name {subscriptionname}')
# Delete Topic
self.cmd('servicebus topic delete --resource-group {rg} --namespace-name {namespacename} --name {topicname}')
# Delete Namespace
self.cmd('servicebus namespace delete --resource-group {rg} --name {namespacename}')
| mit |
epaglier/Project-JARVIS | mycroft-core/mycroft/client/speech/hotword_factory.py | 1 | 5746 | # Copyright 2017 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from mycroft.configuration import ConfigurationManager
from mycroft.util.log import getLogger
from os.path import dirname, exists, join, abspath
import os
import time
import tempfile
__author__ = 'seanfitz, jdorleans, jarbas'
LOG = getLogger("HotwordFactory")
RECOGNIZER_DIR = join(abspath(dirname(__file__)), "recognizer")
class HotWordEngine(object):
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
self.lang = str(lang).lower()
self.key_phrase = str(key_phrase).lower()
# rough estimate 1 phoneme per 2 chars
self.num_phonemes = len(key_phrase) / 2 + 1
if config is None:
config = ConfigurationManager.get().get("hot_words", {})
config = config.get(self.key_phrase, {})
self.config = config
self.listener_config = ConfigurationManager.get().get("listener", {})
def found_wake_word(self, frame_data):
return False
class PocketsphinxHotWord(HotWordEngine):
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
super(PocketsphinxHotWord, self).__init__(key_phrase, config, lang)
# Hotword module imports
from pocketsphinx import Decoder
# Hotword module config
module = self.config.get("module")
if module != "pocketsphinx":
LOG.warning(
str(module) + " module does not match with "
"Hotword class pocketsphinx")
# Hotword module params
self.phonemes = self.config.get("phonemes", "HH EY . M AY K R AO F T")
self.num_phonemes = len(self.phonemes.split())
self.threshold = self.config.get("threshold", 1e-90)
self.sample_rate = self.listener_config.get("sample_rate", 1600)
dict_name = self.create_dict(key_phrase, self.phonemes)
config = self.create_config(dict_name, Decoder.default_config())
self.decoder = Decoder(config)
def create_dict(self, key_phrase, phonemes):
(fd, file_name) = tempfile.mkstemp()
words = key_phrase.split()
phoneme_groups = phonemes.split('.')
with os.fdopen(fd, 'w') as f:
for word, phoneme in zip(words, phoneme_groups):
f.write(word + ' ' + phoneme + '\n')
return file_name
def create_config(self, dict_name, config):
model_file = join(RECOGNIZER_DIR, 'model', self.lang, 'hmm')
if not exists(model_file):
LOG.error('PocketSphinx model not found at ' + str(model_file))
config.set_string('-hmm', model_file)
config.set_string('-dict', dict_name)
config.set_string('-keyphrase', self.key_phrase)
config.set_float('-kws_threshold', float(self.threshold))
config.set_float('-samprate', self.sample_rate)
config.set_int('-nfft', 2048)
config.set_string('-logfn', '/dev/null')
return config
def transcribe(self, byte_data, metrics=None):
start = time.time()
self.decoder.start_utt()
self.decoder.process_raw(byte_data, False, False)
self.decoder.end_utt()
if metrics:
metrics.timer("mycroft.stt.local.time_s", time.time() - start)
return self.decoder.hyp()
def found_wake_word(self, frame_data):
hyp = self.transcribe(frame_data)
return hyp and self.key_phrase in hyp.hypstr.lower()
class SnowboyHotWord(HotWordEngine):
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
super(SnowboyHotWord, self).__init__(key_phrase, config, lang)
# Hotword module imports
from snowboydecoder import HotwordDetector
# Hotword module config
module = self.config.get("module")
if module != "snowboy":
LOG.warning(module + " module does not match with Hotword class "
"snowboy")
# Hotword params
models = self.config.get("models", {})
paths = []
for key in models:
paths.append(models[key])
sensitivity = self.config.get("sensitivity", 0.5)
self.snowboy = HotwordDetector(paths,
sensitivity=[sensitivity] * len(paths))
self.lang = str(lang).lower()
self.key_phrase = str(key_phrase).lower()
def found_wake_word(self, frame_data):
wake_word = self.snowboy.detector.RunDetection(frame_data)
return wake_word == 1
class HotWordFactory(object):
CLASSES = {
"pocketsphinx": PocketsphinxHotWord,
"snowboy": SnowboyHotWord
}
@staticmethod
def create_hotword(hotword="hey mycroft", config=None, lang="en-us"):
LOG.info("creating " + hotword)
if not config:
config = ConfigurationManager.get().get("hotwords", {})
module = config.get(hotword).get("module", "pocketsphinx")
config = config.get(hotword, {"module": module})
clazz = HotWordFactory.CLASSES.get(module)
return clazz(hotword, config, lang=lang)
| gpl-3.0 |
benranco/SNPpipeline | tools/vcflib/googletest/googletest/test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
flotre/Sick-Beard | lib/requests/compat.py | 49 | 2471 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
try:
import cchardet as chardet
except ImportError:
from .packages import chardet
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from .packages import chardet2 as chardet
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str,bytes)
numeric_types = (int, float)
| gpl-3.0 |
aclifton/cpeg853-gem5 | tests/configs/pc-simple-timing.py | 52 | 2346 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from x86_generic import *
root = LinuxX86FSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_x64,
cpu_class=TimingSimpleCPU).create_root()
| bsd-3-clause |
beetbox/beets | beetsplug/replaygain.py | 1 | 49627 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import collections
import enum
import math
import os
import signal
import six
import subprocess
import sys
import warnings
from multiprocessing.pool import ThreadPool, RUN
from six.moves import zip, queue
from threading import Thread, Event
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import (syspath, command_output, displayable_path,
py3_path, cpu_count)
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args, **kwargs):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args, **kwargs)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
u"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# https://github.com/google-code-export/beets/issues/499
raise ReplayGainError(u"argument encoding failed")
def after_version(version_a, version_b):
return tuple(int(s) for s in version_a.split('.')) \
>= tuple(int(s) for s in version_b.split('.'))
def db_to_lufs(db):
"""Convert db to LUFS.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db - 107
def lufs_to_db(db):
"""Convert LUFS to db.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db + 107
# Backend base and plumbing classes.
# gain: in LU to reference level
# peak: part of full scale (FS is 1.0)
Gain = collections.namedtuple("Gain", "gain peak")
# album_gain: Gain object
# track_gains: list of Gain objects
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Peak(enum.Enum):
none = 0
true = 1
sample = 2
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
do_parallel = False
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of Gain objects.
"""
raise NotImplementedError()
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
raise NotImplementedError()
# ffmpeg backend
class FfmpegBackend(Backend):
"""A replaygain backend using ffmpeg's ebur128 filter.
"""
do_parallel = True
def __init__(self, config, log):
super(FfmpegBackend, self).__init__(config, log)
self._ffmpeg_path = "ffmpeg"
# check that ffmpeg is installed
try:
ffmpeg_version_out = call([self._ffmpeg_path, "-version"])
except OSError:
raise FatalReplayGainError(
u"could not find ffmpeg at {0}".format(self._ffmpeg_path)
)
incompatible_ffmpeg = True
for line in ffmpeg_version_out.stdout.splitlines():
if line.startswith(b"configuration:"):
if b"--enable-libebur128" in line:
incompatible_ffmpeg = False
if line.startswith(b"libavfilter"):
version = line.split(b" ", 1)[1].split(b"/", 1)[0].split(b".")
version = tuple(map(int, version))
if version >= (6, 67, 100):
incompatible_ffmpeg = False
if incompatible_ffmpeg:
raise FatalReplayGainError(
u"Installed FFmpeg version does not support ReplayGain."
u"calculation. Either libavfilter version 6.67.100 or above or"
u"the --enable-libebur128 configuration option is required."
)
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of Gain objects (the track gains).
"""
gains = []
for item in items:
gains.append(
self._analyse_item(
item,
target_level,
peak,
count_blocks=False,
)[0] # take only the gain, discarding number of gating blocks
)
return gains
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
target_level_lufs = db_to_lufs(target_level)
# analyse tracks
# list of track Gain objects
track_gains = []
# maximum peak
album_peak = 0
# sum of BS.1770 gating block powers
sum_powers = 0
# total number of BS.1770 gating blocks
n_blocks = 0
for item in items:
track_gain, track_n_blocks = self._analyse_item(
item, target_level, peak
)
track_gains.append(track_gain)
# album peak is maximum track peak
album_peak = max(album_peak, track_gain.peak)
# prepare album_gain calculation
# total number of blocks is sum of track blocks
n_blocks += track_n_blocks
# convert `LU to target_level` -> LUFS
track_loudness = target_level_lufs - track_gain.gain
# This reverses ITU-R BS.1770-4 p. 6 equation (5) to convert
# from loudness to power. The result is the average gating
# block power.
track_power = 10**((track_loudness + 0.691) / 10)
# Weight that average power by the number of gating blocks to
# get the sum of all their powers. Add that to the sum of all
# block powers in this album.
sum_powers += track_power * track_n_blocks
# calculate album gain
if n_blocks > 0:
# compare ITU-R BS.1770-4 p. 6 equation (5)
# Album gain is the replaygain of the concatenation of all tracks.
album_gain = -0.691 + 10 * math.log10(sum_powers / n_blocks)
else:
album_gain = -70
# convert LUFS -> `LU to target_level`
album_gain = target_level_lufs - album_gain
self._log.debug(
u"{0}: gain {1} LU, peak {2}"
.format(items, album_gain, album_peak)
)
return AlbumGain(Gain(album_gain, album_peak), track_gains)
def _construct_cmd(self, item, peak_method):
"""Construct the shell command to analyse items."""
return [
self._ffmpeg_path,
"-nostats",
"-hide_banner",
"-i",
item.path,
"-map",
"a:0",
"-filter",
"ebur128=peak={0}".format(peak_method),
"-f",
"null",
"-",
]
def _analyse_item(self, item, target_level, peak, count_blocks=True):
"""Analyse item. Return a pair of a Gain object and the number
of gating blocks above the threshold.
If `count_blocks` is False, the number of gating blocks returned
will be 0.
"""
target_level_lufs = db_to_lufs(target_level)
peak_method = peak.name
# call ffmpeg
self._log.debug(u"analyzing {0}".format(item))
cmd = self._construct_cmd(item, peak_method)
self._log.debug(
u'executing {0}', u' '.join(map(displayable_path, cmd))
)
output = call(cmd).stderr.splitlines()
# parse output
if peak == Peak.none:
peak = 0
else:
line_peak = self._find_line(
output,
" {0} peak:".format(peak_method.capitalize()).encode(),
start_line=len(output) - 1, step_size=-1,
)
peak = self._parse_float(
output[self._find_line(
output, b" Peak:",
line_peak,
)]
)
# convert TPFS -> part of FS
peak = 10**(peak / 20)
line_integrated_loudness = self._find_line(
output, b" Integrated loudness:",
start_line=len(output) - 1, step_size=-1,
)
gain = self._parse_float(
output[self._find_line(
output, b" I:",
line_integrated_loudness,
)]
)
# convert LUFS -> LU from target level
gain = target_level_lufs - gain
# count BS.1770 gating blocks
n_blocks = 0
if count_blocks:
gating_threshold = self._parse_float(
output[self._find_line(
output, b" Threshold:",
start_line=line_integrated_loudness,
)]
)
for line in output:
if not line.startswith(b"[Parsed_ebur128"):
continue
if line.endswith(b"Summary:"):
continue
line = line.split(b"M:", 1)
if len(line) < 2:
continue
if self._parse_float(b"M: " + line[1]) >= gating_threshold:
n_blocks += 1
self._log.debug(
u"{0}: {1} blocks over {2} LUFS"
.format(item, n_blocks, gating_threshold)
)
self._log.debug(
u"{0}: gain {1} LU, peak {2}"
.format(item, gain, peak)
)
return Gain(gain, peak), n_blocks
def _find_line(self, output, search, start_line=0, step_size=1):
"""Return index of line beginning with `search`.
Begins searching at index `start_line` in `output`.
"""
end_index = len(output) if step_size > 0 else -1
for i in range(start_line, end_index, step_size):
if output[i].startswith(search):
return i
raise ReplayGainError(
u"ffmpeg output: missing {0} after line {1}"
.format(repr(search), start_line)
)
def _parse_float(self, line):
"""Extract a float from a key value pair in `line`.
This format is expected: /[^:]:[[:space:]]*value.*/, where `value` is
the float.
"""
# extract value
value = line.split(b":", 1)
if len(value) < 2:
raise ReplayGainError(
u"ffmpeg output: expected key value pair, found {0}"
.format(line)
)
value = value[1].lstrip()
# strip unit
value = value.split(b" ", 1)[0]
# cast value to float
try:
return float(value)
except ValueError:
raise ReplayGainError(
u"ffmpeg output: expected float value, found {0}"
.format(value)
)
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
do_parallel = True
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].as_str()
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
u'replaygain command does not exist: {0}'.format(
self.command)
)
else:
# Check whether the program is in $PATH.
for cmd in ('mp3gain', 'aacgain'):
try:
call([cmd, '-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
u'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, target_level, False)
return output
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = list(filter(self.format_supported, items))
if len(supported_items) != len(items):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, target_level, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, target_level, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug(u'no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, '-o', '-s', 's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ['-k']
else:
# Disable clipping warning.
cmd = cmd + ['-c']
cmd = cmd + ['-d', str(int(target_level - 89))]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd).stdout
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError(u'mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalGstreamerPluginReplayGainError(
u"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
u"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
u"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, target_level, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
self._rg.set_property("reference-level", target_level)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items, target_level, peak):
self.compute(items, target_level, False)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, items, target_level, peak):
items = list(items)
self.compute(items, target_level, True)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some items in album did not receive tags")
# Collect track gains.
track_gains = []
for item in items:
try:
gain = self._file_tags[item]["TRACK_GAIN"]
peak = self._file_tags[item]["TRACK_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for track")
track_gains.append(Gain(gain, peak))
# Get album gain information from the last track.
last_tags = self._file_tags[items[-1]]
try:
gain = last_tags["ALBUM_GAIN"]
peak = last_tags["ALBUM_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for album")
return AlbumGain(Gain(gain, peak), track_gains)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
u"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f)
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
u"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(py3_path(syspath(item.path)))
except IOError:
raise ReplayGainError(
u"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
u"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
u"Unsupported sample rate {}".format(item.samplerate))
return
return rg
def compute_track_gain(self, items, target_level, peak):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item, target_level) for item in items]
def _with_target_level(self, gain, target_level):
"""Return `gain` relative to `target_level`.
Assumes `gain` is relative to 89 db.
"""
return gain + (target_level - 89)
def _title_gain(self, rg, audiofile, target_level):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
gain, peak = rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug(u'error in rg.title_gain() call: {}', exc)
raise ReplayGainError(u'audiotools audio data error')
return self._with_target_level(gain, target_level), peak
def _compute_track_gain(self, item, target_level):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, items, target_level, peak):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(items)[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in items:
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
rg_album_gain = self._with_target_level(rg_album_gain, target_level)
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
items[0].album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
class ExceptionWatcher(Thread):
"""Monitors a queue for exceptions asynchronously.
Once an exception occurs, raise it and execute a callback.
"""
def __init__(self, queue, callback):
self._queue = queue
self._callback = callback
self._stopevent = Event()
Thread.__init__(self)
def run(self):
while not self._stopevent.is_set():
try:
exc = self._queue.get_nowait()
self._callback()
six.reraise(exc[0], exc[1], exc[2])
except queue.Empty:
# No exceptions yet, loop back to check
# whether `_stopevent` is set
pass
def join(self, timeout=None):
self._stopevent.set()
Thread.join(self, timeout)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"ffmpeg": FfmpegBackend,
}
peak_methods = {
"true": Peak.true,
"sample": Peak.sample,
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'threads': cpu_count(),
'parallel_on_import': False,
'per_disc': False,
'peak': 'true',
'targetlevel': 89,
'r128': ['Opus'],
'r128_targetlevel': lufs_to_db(-23),
})
self.overwrite = self.config['overwrite'].get(bool)
self.per_disc = self.config['per_disc'].get(bool)
# Remember which backend is used for CLI feedback
self.backend_name = self.config['backend'].as_str()
if self.backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
self.backend_name,
u', '.join(self.backends.keys())
)
)
peak_method = self.config["peak"].as_str()
if peak_method not in self.peak_methods:
raise ui.UserError(
u"Selected ReplayGain peak method {0} is not supported. "
u"Please select one of: {1}".format(
peak_method,
u', '.join(self.peak_methods.keys())
)
)
self._peak_method = self.peak_methods[peak_method]
# On-import analysis.
if self.config['auto']:
self.register_listener('import_begin', self.import_begin)
self.register_listener('import', self.import_end)
self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config['r128'].as_str_seq()
try:
self.backend_instance = self.backends[self.backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
def should_use_r128(self, item):
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
def track_requires_gain(self, item):
return self.overwrite or \
(self.should_use_r128(item) and not item.r128_track_gain) or \
(not self.should_use_r128(item) and
(not item.rg_track_gain or not item.rg_track_peak))
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([self.should_use_r128(item) and
(not item.r128_track_gain or not item.r128_album_gain)
for item in album.items()]) or \
any([not self.should_use_r128(item) and
(not item.rg_album_gain or not item.rg_album_peak)
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0} LU, peak {1} of FS',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, item, album_gain):
item.rg_album_gain = album_gain.gain
item.rg_album_peak = album_gain.peak
item.store()
self._log.debug(u'applied album gain {0} LU, peak {1} of FS',
item.rg_album_gain, item.rg_album_peak)
def store_track_r128_gain(self, item, track_gain):
item.r128_track_gain = track_gain.gain
item.store()
self._log.debug(u'applied r128 track gain {0} LU',
item.r128_track_gain)
def store_album_r128_gain(self, item, album_gain):
item.r128_album_gain = album_gain.gain
item.store()
self._log.debug(u'applied r128 album gain {0} LU',
item.r128_album_gain)
def tag_specific_values(self, items):
"""Return some tag specific values.
Returns a tuple (store_track_gain, store_album_gain, target_level,
peak_method).
"""
if any([self.should_use_r128(item) for item in items]):
store_track_gain = self.store_track_r128_gain
store_album_gain = self.store_album_r128_gain
target_level = self.config['r128_targetlevel'].as_number()
peak = Peak.none # R128_* tags do not store the track/album peak
else:
store_track_gain = self.store_track_gain
store_album_gain = self.store_album_gain
target_level = self.config['targetlevel'].as_number()
peak = self._peak_method
return store_track_gain, store_album_gain, target_level, peak
def handle_album(self, album, write, force=False):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not force and not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
if (any([self.should_use_r128(item) for item in album.items()]) and not
all(([self.should_use_r128(item) for item in album.items()]))):
self._log.error(
u"Cannot calculate gain for album {0} (incompatible formats)",
album)
return
self._log.info(u'analyzing {0}', album)
tag_vals = self.tag_specific_values(album.items())
store_track_gain, store_album_gain, target_level, peak = tag_vals
discs = {}
if self.per_disc:
for item in album.items():
if discs.get(item.disc) is None:
discs[item.disc] = []
discs[item.disc].append(item)
else:
discs[1] = album.items()
for discnumber, items in discs.items():
def _store_album(album_gain):
if not album_gain or not album_gain.album_gain \
or len(album_gain.track_gains) != len(items):
# In some cases, backends fail to produce a valid
# `album_gain` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
u"ReplayGain backend `{}` failed "
u"for some tracks in album {}"
.format(self.backend_name, album)
)
for item, track_gain in zip(items,
album_gain.track_gains):
store_track_gain(item, track_gain)
store_album_gain(item, album_gain.album_gain)
if write:
item.try_write()
self._log.debug(u'done analyzing {0}', item)
try:
self._apply(
self.backend_instance.compute_album_gain, args=(),
kwds={
"items": list(items),
"target_level": target_level,
"peak": peak
},
callback=_store_album
)
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def handle_track(self, item, write, force=False):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not force and not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
tag_vals = self.tag_specific_values([item])
store_track_gain, store_album_gain, target_level, peak = tag_vals
def _store_track(track_gains):
if not track_gains or len(track_gains) != 1:
# In some cases, backends fail to produce a valid
# `track_gains` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
u"ReplayGain backend `{}` failed for track {}"
.format(self.backend_name, item)
)
store_track_gain(item, track_gains[0])
if write:
item.try_write()
self._log.debug(u'done analyzing {0}', item)
try:
self._apply(
self.backend_instance.compute_track_gain, args=(),
kwds={
"items": [item],
"target_level": target_level,
"peak": peak,
},
callback=_store_track
)
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(u"Fatal replay gain error: {0}".format(e))
def _has_pool(self):
"""Check whether a `ThreadPool` is running instance in `self.pool`
"""
if hasattr(self, 'pool'):
if isinstance(self.pool, ThreadPool) and self.pool._state == RUN:
return True
return False
def open_pool(self, threads):
"""Open a `ThreadPool` instance in `self.pool`
"""
if not self._has_pool() and self.backend_instance.do_parallel:
self.pool = ThreadPool(threads)
self.exc_queue = queue.Queue()
signal.signal(signal.SIGINT, self._interrupt)
self.exc_watcher = ExceptionWatcher(
self.exc_queue, # threads push exceptions here
self.terminate_pool # abort once an exception occurs
)
self.exc_watcher.start()
def _apply(self, func, args, kwds, callback):
if self._has_pool():
def catch_exc(func, exc_queue, log):
"""Wrapper to catch raised exceptions in threads
"""
def wfunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except ReplayGainError as e:
log.info(e.args[0]) # log non-fatal exceptions
except Exception:
exc_queue.put(sys.exc_info())
return wfunc
# Wrap function and callback to catch exceptions
func = catch_exc(func, self.exc_queue, self._log)
callback = catch_exc(callback, self.exc_queue, self._log)
self.pool.apply_async(func, args, kwds, callback)
else:
callback(func(*args, **kwds))
def terminate_pool(self):
"""Terminate the `ThreadPool` instance in `self.pool`
(e.g. stop execution in case of exception)
"""
# Don't call self._as_pool() here,
# self.pool._state may not be == RUN
if hasattr(self, 'pool') and isinstance(self.pool, ThreadPool):
self.pool.terminate()
self.pool.join()
# self.exc_watcher.join()
def _interrupt(self, signal, frame):
try:
self._log.info('interrupted')
self.terminate_pool()
sys.exit(0)
except SystemExit:
# Silence raised SystemExit ~ exit(0)
pass
def close_pool(self):
"""Close the `ThreadPool` instance in `self.pool` (if there is one)
"""
if self._has_pool():
self.pool.close()
self.pool.join()
self.exc_watcher.join()
def import_begin(self, session):
"""Handle `import_begin` event -> open pool
"""
threads = self.config['threads'].get(int)
if self.config['parallel_on_import'] \
and self.config['auto'] \
and threads:
self.open_pool(threads)
def import_end(self, paths):
"""Handle `import` event -> close pool
"""
self.close_pool()
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if self.config['auto']:
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def command_func(self, lib, opts, args):
try:
write = ui.should_write(opts.write)
force = opts.force
# Bypass self.open_pool() if called with `--threads 0`
if opts.threads != 0:
threads = opts.threads or self.config['threads'].get(int)
self.open_pool(threads)
if opts.album:
albums = lib.albums(ui.decargs(args))
self._log.info(
"Analyzing {} albums ~ {} backend..."
.format(len(albums), self.backend_name)
)
for album in albums:
self.handle_album(album, write, force)
else:
items = lib.items(ui.decargs(args))
self._log.info(
"Analyzing {} tracks ~ {} backend..."
.format(len(items), self.backend_name)
)
for item in items:
self.handle_track(item, write, force)
self.close_pool()
except (SystemExit, KeyboardInterrupt):
# Silence interrupt exceptions
pass
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.parser.add_option(
"-t", "--threads", dest="threads", type=int,
help=u'change the number of threads, \
defaults to maximum available processors'
)
cmd.parser.add_option(
"-f", "--force", dest="force", action="store_true", default=False,
help=u"analyze all files, including those that "
"already have ReplayGain metadata")
cmd.parser.add_option(
"-w", "--write", default=None, action="store_true",
help=u"write new metadata to files' tags")
cmd.parser.add_option(
"-W", "--nowrite", dest="write", action="store_false",
help=u"don't write metadata (opposite of -w)")
cmd.func = self.command_func
return [cmd]
| mit |
abantam/pmtud | nsc/scons-local-1.2.0.d20090223/SCons/Scanner/D.py | 19 | 2560 | """SCons.Scanner.D
Scanner for the Digital Mars "D" programming language.
Coded by Andy Friesen
17 Nov 2003
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/D.py 4043 2009/02/23 09:06:45 scons"
import re
import string
import SCons.Scanner
def DScanner():
"""Return a prototype Scanner instance for scanning D source files"""
ds = D()
return ds
class D(SCons.Scanner.Classic):
def __init__ (self):
SCons.Scanner.Classic.__init__ (self,
name = "DScanner",
suffixes = '$DSUFFIXES',
path_variable = 'DPATH',
regex = 'import\s+(?:[a-zA-Z0-9_.]+)\s*(?:,\s*(?:[a-zA-Z0-9_.]+)\s*)*;')
self.cre2 = re.compile ('(?:import\s)?\s*([a-zA-Z0-9_.]+)\s*(?:,|;)', re.M)
def find_include(self, include, source_dir, path):
# translate dots (package separators) to slashes
inc = string.replace(include, '.', '/')
i = SCons.Node.FS.find_file(inc + '.d', (source_dir,) + path)
if i is None:
i = SCons.Node.FS.find_file (inc + '.di', (source_dir,) + path)
return i, include
def find_include_names(self, node):
includes = []
for i in self.cre.findall(node.get_text_contents()):
includes = includes + self.cre2.findall(i)
return includes
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
renyi533/tensorflow | tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py | 8 | 4186 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FromSparseTensorSlicesTest(test_base.DatasetTestBase,
parameterized.TestCase):
# TODO(jsimsa): Break this down to multiple (parameterized) test cases.
@combinations.generate(
combinations.combine(tf_api_version=1, mode=["graph"]))
def testFromSparseTensorSlices(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"]))
def testFromSparseTensorSlicesError(self):
with self.assertRaises(AttributeError):
dataset_ops.Dataset.from_sparse_tensor_slices(None)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Kongsea/tensorflow | tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py | 35 | 4876 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing fused conv2d bias_add and relu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.fused_conv.ops import gen_fused_conv2d_bias_activation_op
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_fused_conv2d_bias_activation_op_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_fused_conv2d_bias_activation_op.so"))
# pylint: disable=redefined-builtin
def fused_conv2d_bias_activation(conv_input,
filter,
bias,
strides=None,
padding=None,
conv_input_scale=1.0,
side_input_scale=0.0,
side_input=None,
activation_mode="Relu",
data_format=None,
filter_format=None,
name=None):
"""Fused 2D conv, bias and activation with optional side input.
Computes a fused 2-D convolution scaled by conv_input_scale,
adds an optional side input scaled by side_input_scale, adds biases,
and applies ReLU. As an equation:
output = ReLU(conv_input_scale * Conv(conv_input, filter) +
side_input_scale * side_input + bias)
Note: In int8 mode, The ReLU will clip the output to the range [0..127].
Args:
conv_input: A `Tensor` of the format specified by `data_format`.
filter: A `Tensor` whose format depends on `data_format`:
if `data_format` is "NCHW_VECT_C", filter should be "OIHW_VECT_I"
otherwise, it should be "HWIO" format.
bias: A 1-D `Tensor` of type `float32`, and dimensions equal to the
number of output channels.
strides: A list of 4 `ints` specifying convolution strides.
if `data_format` is "NCHW" or "NCHW_VECT_C", the order should be NCHW.
if `data_format` is "NHWC", the order should be NHWC.
padding: A `string` from: `"SAME", "VALID"`.
conv_input_scale: A scalar `float32` that will be multiplied by conv_input.
This is optional and defaults to 1. However it should be set to
specify the quantization scale when `data_format` is "NCHW_VECT_C".
side_input_scale: A scalar `float32` that will be multiplied by side_input.
This is optional and defaults to 0.
side_input: A `Tensor` of the format specified by `data_format`.
This is useful for imlementing ResNet blocks.
activation_mode: (optional) currently must be the default "Relu".
Note that in qint8 mode, it also clips to 127, so acts like ReluX.
data_format: Specifies the data format.
Possible values are:
"NHWC" float [batch, height, width, channels]
"NCHW" float [batch, channels, height, width]
"NCHW_VECT_C" qint8 [batch, channels / 4, height, width, channels % 4]
Defaults to `"NHWC"`.
Performance is worst for `"NHWC"` and best for `"NCHW_VECT_C"`.
filter_format: Specifies the filter format.
Possible values are:
"HWIO" float [kernel_height, kernel_width, input_channels,
output_channels ]
"OIHW" float [output_channels, input_channels, kernel_height,
kernel_width ]
"OIHW_VECT_I" qint8 [ output_channels, input_channels / 4,
kernel_height, kernel_width, input_channels % 4 ]
Defaults to `"HWIO"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the format specified by `data_format`.
"""
if strides is None:
strides = [1, 1, 1, 1]
if side_input is None:
side_input = []
return gen_fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
filter,
bias,
side_input,
conv_input_scale,
side_input_scale,
padding=padding,
strides=strides,
activation_mode=activation_mode,
data_format=data_format,
filter_format=filter_format,
name=name)
| apache-2.0 |
perfidia/regexpgen | tests/Date.py | 1 | 10848 | '''
Created on Mar 16, 2012
@author: Bartosz Alchimowicz
'''
import unittest
import regexpgen
import re
class Test(unittest.TestCase):
def testDefault(self):
regexp = regexpgen.date("%Y")
self.assertTrue(re.match(regexp, "1990"))
self.assertTrue(re.match(regexp, "2099"))
self.assertTrue(re.match(regexp, "1970"))
self.assertTrue(re.match(regexp, "1983"))
self.assertTrue(re.match(regexp, "2012"))
self.assertFalse(re.match(regexp, "1"))
self.assertFalse(re.match(regexp, "33"))
self.assertFalse(re.match(regexp, "0024"))
self.assertFalse(re.match(regexp, "99"))
self.assertFalse(re.match(regexp, "-17"))
self.assertFalse(re.match(regexp, "2100"))
self.assertFalse(re.match(regexp, "1969"))
regexp = regexpgen.date("%y")
self.assertTrue(re.match(regexp, "90"))
self.assertTrue(re.match(regexp, "99"))
self.assertTrue(re.match(regexp, "70"))
self.assertTrue(re.match(regexp, "83"))
self.assertTrue(re.match(regexp, "02"))
self.assertFalse(re.match(regexp, "1"))
self.assertFalse(re.match(regexp, "335"))
self.assertFalse(re.match(regexp, "0024"))
self.assertFalse(re.match(regexp, "9"))
self.assertFalse(re.match(regexp, "-17"))
self.assertFalse(re.match(regexp, "1ss"))
regexp = regexpgen.date("%m")
self.assertTrue(re.match(regexp, "12"))
self.assertTrue(re.match(regexp, "01"))
self.assertTrue(re.match(regexp, "11"))
self.assertTrue(re.match(regexp, "09"))
self.assertFalse(re.match(regexp, "1"))
self.assertFalse(re.match(regexp, "335"))
self.assertFalse(re.match(regexp, "13"))
self.assertFalse(re.match(regexp, "00"))
self.assertFalse(re.match(regexp, "-17"))
self.assertFalse(re.match(regexp, "1s"))
regexp = regexpgen.date("%d")
self.assertTrue(re.match(regexp, "12"))
self.assertTrue(re.match(regexp, "01"))
self.assertTrue(re.match(regexp, "31"))
self.assertTrue(re.match(regexp, "28"))
self.assertTrue(re.match(regexp, "09"))
self.assertFalse(re.match(regexp, "1"))
self.assertFalse(re.match(regexp, "335"))
self.assertFalse(re.match(regexp, "99"))
self.assertFalse(re.match(regexp, "00"))
self.assertFalse(re.match(regexp, "-17"))
self.assertFalse(re.match(regexp, "1ss"))
regexp = regexpgen.date("%d-%m")
self.assertTrue(re.match(regexp, "12-12"))
self.assertTrue(re.match(regexp, "01-01"))
self.assertTrue(re.match(regexp, "31-12"))
self.assertTrue(re.match(regexp, "28-02"))
self.assertTrue(re.match(regexp, "09-09"))
self.assertFalse(re.match(regexp, "1-10"))
self.assertFalse(re.match(regexp, "31-02"))
self.assertFalse(re.match(regexp, "99-92"))
self.assertFalse(re.match(regexp, "00-00"))
self.assertFalse(re.match(regexp, "-17-00"))
self.assertFalse(re.match(regexp, "1ss"))
regexp = regexpgen.date("%Y-%m")
self.assertTrue(re.match(regexp, "2012-12"))
self.assertTrue(re.match(regexp, "2001-01"))
self.assertTrue(re.match(regexp, "1991-12"))
self.assertTrue(re.match(regexp, "2050-02"))
self.assertTrue(re.match(regexp, "1999-09"))
self.assertFalse(re.match(regexp, "1955-10"))
self.assertFalse(re.match(regexp, "31-02"))
self.assertFalse(re.match(regexp, "3099-92"))
self.assertFalse(re.match(regexp, "0000-00"))
self.assertFalse(re.match(regexp, "-1700-00"))
self.assertFalse(re.match(regexp, "1sss-ss"))
regexp = regexpgen.date("%Y-%m-%d")
self.assertTrue(re.match(regexp, "2089-01-12"))
self.assertTrue(re.match(regexp, "2087-12-13"))
self.assertTrue(re.match(regexp, "2090-02-28"))
self.assertTrue(re.match(regexp, "2088-09-30"))
self.assertFalse(re.match(regexp, "1955-10-00"))
self.assertFalse(re.match(regexp, "31-02-04"))
self.assertFalse(re.match(regexp, "3099-92-19"))
self.assertFalse(re.match(regexp, "0000-00-00"))
self.assertFalse(re.match(regexp, "-1700-00-21"))
self.assertFalse(re.match(regexp, "1sss-ss-45"))
def testForMin(self):
regexp = regexpgen.date("%Y", "1990")
self.assertTrue(re.match(regexp, "1990"))
self.assertTrue(re.match(regexp, "2099"))
self.assertTrue(re.match(regexp, "1997"))
self.assertFalse(re.match(regexp, "1989"))
self.assertFalse(re.match(regexp, "1988"))
self.assertFalse(re.match(regexp, "0024"))
self.assertFalse(re.match(regexp, "1969"))
regexp = regexpgen.date("%y" ,"85")
self.assertTrue(re.match(regexp, "99"))
self.assertTrue(re.match(regexp, "88"))
self.assertTrue(re.match(regexp, "85"))
self.assertTrue(re.match(regexp, "91"))
self.assertFalse(re.match(regexp, "01"))
self.assertFalse(re.match(regexp, "00"))
self.assertFalse(re.match(regexp, "84"))
self.assertFalse(re.match(regexp, "55"))
regexp = regexpgen.date("%m", "06")
self.assertTrue(re.match(regexp, "12"))
self.assertTrue(re.match(regexp, "06"))
self.assertTrue(re.match(regexp, "08"))
self.assertTrue(re.match(regexp, "09"))
self.assertFalse(re.match(regexp, "01"))
self.assertFalse(re.match(regexp, "05"))
self.assertFalse(re.match(regexp, "13"))
self.assertFalse(re.match(regexp, "04"))
regexp = regexpgen.date("%d", "13")
self.assertTrue(re.match(regexp, "13"))
self.assertTrue(re.match(regexp, "14"))
self.assertTrue(re.match(regexp, "31"))
self.assertTrue(re.match(regexp, "28"))
self.assertFalse(re.match(regexp, "01"))
self.assertFalse(re.match(regexp, "12"))
self.assertFalse(re.match(regexp, "99"))
self.assertFalse(re.match(regexp, "00"))
regexp = regexpgen.date("%Y-%m-%d", "2072-12-01")
self.assertTrue(re.match(regexp, "2072-12-01"))
self.assertTrue(re.match(regexp, "2083-01-12"))
self.assertTrue(re.match(regexp, "2090-02-28"))
self.assertTrue(re.match(regexp, "2099-09-30"))
self.assertFalse(re.match(regexp, "1972-12-01"))
self.assertFalse(re.match(regexp, "2012-11-01"))
self.assertFalse(re.match(regexp, "1995-10-01"))
self.assertFalse(re.match(regexp, "1955-10-01"))
def testForMax(self):
regexp = regexpgen.date("%Y", None, "1990")
self.assertFalse(re.match(regexp, "1991"))
self.assertFalse(re.match(regexp, "2099"))
self.assertFalse(re.match(regexp, "1997"))
self.assertTrue(re.match(regexp, "1989"))
self.assertTrue(re.match(regexp, "1990"))
self.assertTrue(re.match(regexp, "1971"))
regexp = regexpgen.date("%y" , None, "85")
self.assertFalse(re.match(regexp, "99"))
self.assertFalse(re.match(regexp, "88"))
self.assertFalse(re.match(regexp, "86"))
self.assertFalse(re.match(regexp, "91"))
self.assertTrue(re.match(regexp, "01"))
self.assertTrue(re.match(regexp, "85"))
self.assertTrue(re.match(regexp, "84"))
self.assertTrue(re.match(regexp, "55"))
regexp = regexpgen.date("%m", None, "06")
self.assertFalse(re.match(regexp, "12"))
self.assertFalse(re.match(regexp, "07"))
self.assertFalse(re.match(regexp, "08"))
self.assertFalse(re.match(regexp, "09"))
self.assertTrue(re.match(regexp, "01"))
self.assertTrue(re.match(regexp, "05"))
self.assertTrue(re.match(regexp, "06"))
self.assertTrue(re.match(regexp, "04"))
regexp = regexpgen.date("%d", None, "13")
self.assertFalse(re.match(regexp, "14"))
self.assertFalse(re.match(regexp, "15"))
self.assertFalse(re.match(regexp, "31"))
self.assertFalse(re.match(regexp, "28"))
self.assertTrue(re.match(regexp, "01"))
self.assertTrue(re.match(regexp, "12"))
self.assertTrue(re.match(regexp, "13"))
self.assertTrue(re.match(regexp, "07"))
regexp = regexpgen.date("%Y-%m-%d", None, "1980-12-01")
self.assertFalse(re.match(regexp, "2072-12-01"))
self.assertFalse(re.match(regexp, "2083-01-12"))
self.assertFalse(re.match(regexp, "2090-02-28"))
self.assertFalse(re.match(regexp, "1980-12-02"))
self.assertTrue(re.match(regexp, "1980-12-01"))
self.assertTrue(re.match(regexp, "1980-11-02"))
self.assertTrue(re.match(regexp, "1975-10-05"))
self.assertTrue(re.match(regexp, "1977-10-21"))
self.assertTrue(re.match(regexp, "1976-02-29"))
self.assertFalse(re.match(regexp, "1977-02-29"))
self.assertTrue(re.match(regexp, "1980-02-29"))
def testForMinMax(self):
regexp = regexpgen.date("%Y", "1990", "2000")
self.assertTrue(re.match(regexp, "1990"))
self.assertTrue(re.match(regexp, "2000"))
self.assertTrue(re.match(regexp, "1997"))
self.assertFalse(re.match(regexp, "1989"))
self.assertFalse(re.match(regexp, "1988"))
self.assertFalse(re.match(regexp, "2001"))
self.assertFalse(re.match(regexp, "2011"))
regexp = regexpgen.date("%y" ,"85", "95")
self.assertTrue(re.match(regexp, "95"))
self.assertTrue(re.match(regexp, "88"))
self.assertTrue(re.match(regexp, "85"))
self.assertTrue(re.match(regexp, "91"))
self.assertFalse(re.match(regexp, "01"))
self.assertFalse(re.match(regexp, "84"))
self.assertFalse(re.match(regexp, "84"))
self.assertFalse(re.match(regexp, "99"))
regexp = regexpgen.date("%m", "06", "10")
self.assertTrue(re.match(regexp, "10"))
self.assertTrue(re.match(regexp, "06"))
self.assertTrue(re.match(regexp, "08"))
self.assertTrue(re.match(regexp, "09"))
self.assertFalse(re.match(regexp, "11"))
self.assertFalse(re.match(regexp, "05"))
self.assertFalse(re.match(regexp, "13"))
self.assertFalse(re.match(regexp, "04"))
regexp = regexpgen.date("%d", "13", "20")
self.assertTrue(re.match(regexp, "13"))
self.assertTrue(re.match(regexp, "14"))
self.assertTrue(re.match(regexp, "20"))
self.assertTrue(re.match(regexp, "15"))
self.assertFalse(re.match(regexp, "21"))
self.assertFalse(re.match(regexp, "12"))
self.assertFalse(re.match(regexp, "99"))
self.assertFalse(re.match(regexp, "00"))
regexp = regexpgen.date("%Y-%m-%d", "2072-12-01", "2085-12-01")
self.assertTrue(re.match(regexp, "2072-12-01"))
self.assertTrue(re.match(regexp, "2083-01-12"))
self.assertTrue(re.match(regexp, "2073-02-28"))
self.assertTrue(re.match(regexp, "2085-12-01"))
self.assertFalse(re.match(regexp, "2085-12-02"))
self.assertFalse(re.match(regexp, "2072-11-30"))
self.assertFalse(re.match(regexp, "1995-10-01"))
self.assertFalse(re.match(regexp, "1955-10-01"))
def testForWrongFormat(self):
self.assertRaises(ValueError, regexpgen.date, "%wd %ay")
self.assertRaises(ValueError,regexpgen.date, "%Y:%y")
self.assertRaises(ValueError,regexpgen.date, "%y:%d")
self.assertRaises(ValueError,regexpgen.date, "%Y:%d")
self.assertRaises(ValueError,regexpgen.date, "%P")
def testForWrongInput(self):
self.assertRaises(ValueError,regexpgen.time, "%d:%m", "01:00", "00:00")
self.assertRaises(ValueError,regexpgen.time, "%Y-%m", "99-03", "1998-03")
self.assertRaises(ValueError,regexpgen.time, "%m-%d", "13-02", "02-02")
self.assertRaises(ValueError,regexpgen.time, "%m", "12", "02")
self.assertRaises(ValueError,regexpgen.time, "%d", "00", "100")
self.assertRaises(ValueError,regexpgen.time, "%Y/%m/%d", "1990-02/02", "1992/03-03")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit |
YantaiGao/learn_Python_The-Hard-Way | No38_ListOp.py | 1 | 1059 | # -*- coding:utf-8 -*-
#注意类的声明方法:
class Thing(object):
#self是需要有的 否则报错
def test(self,hi):
print hi
a = Thing()#这是声明类的一个对象
a.test("hahaha")
print "---------------------------------"
test_things = "Apple Orange Crows Telephone Light Suger"
print "There is not 10 things in that list,let's fix it."
stuff = test_things.split(' ')
more_stuff = ["Mon","Tues","Wed","Thris","Fir","Sat","Sun","MOON"]
while len(stuff)!=10:
#注意:pop()方法是从后往前出,先出最后一个
next = more_stuff.pop()
print "Adding ", next
#append()方法是增加
stuff.append(next)
print "There are %d elements in list " %len(stuff)
print "Here we go: ",stuff
#注意:下标从0开始!!!
print stuff[1]
#注意:-1是最后一个,下标从-1开始 ,从后向前遍历
print "stuff[-1] == ",stuff[-1]
print "stuff[-2] == ",stuff[-2]
print stuff.pop()
#注意:并没有修改数组实际的元素
print ' '.join(stuff)
#stuff[3:5]类似range()
print '#'.join(stuff[3:5])
print stuff | gpl-3.0 |
jr-garcia/Engendro3D | e3d/model_management/interpolation.py | 1 | 1562 | # import numpy
# from cycgkit.cgtypes import vec3, quat
def getClosest(keys, time, chrid, sortedKeys):
def getfrom(keys1, time, ch):
try:
if ch == 'p':
return keys1[time].position
elif ch == 's':
return keys1[time].scale
else:
return keys1[time].rotation
except KeyError:
return None
a = None
b = None
a1 = -1
b1 = -1
for i in range(len(keys) - 1, -1, -1):
if sortedKeys[i] < time:
a = getfrom(keys, sortedKeys[i], chrid)
a1 = sortedKeys[i]
break
for j in range(len(keys)):
if sortedKeys[j] > time:
b = getfrom(keys, sortedKeys[j], chrid)
b1 = sortedKeys[j]
break
if a is None:
if b is not None:
return b, None, time
else:
return getfrom(keys, 0, chrid), None, time
t = 1.0 - ((b1 - time) / (b1 - a1))
return a, b, t
# -----------
# http:#keithmaggio.wordpress.com/2011/02/15/math-magician-lerp-slerp-and-nlerp/
def Lerp(percent, start, end):
return start + (percent * (end - start))
# def Nlerp(percent, start, end):
# res = Lerp(percent, start, end)
# if res.shape[0] == 3:
# return numpy.array(vec3(res).normalize())
# else:
# na = numpy.zeros(shape=(4,))
# tres = quat(res).normalize()
# # na = res
# na[0] = tres.w
# na[1] = tres.x
# na[2] = tres.y
# na[3] = tres.z
# return na | mit |
NetApp/manila | manila/tests/share/drivers/emc/plugins/vnx/test_object_manager.py | 1 | 125573 | # Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
from lxml import builder
import mock
from oslo_concurrency import processutils
from manila.common import constants as const
from manila import exception
from manila.share.drivers.emc.plugins.vnx import connector
from manila.share.drivers.emc.plugins.vnx import constants
from manila.share.drivers.emc.plugins.vnx import object_manager as manager
from manila.share.drivers.emc.plugins.vnx import xml_api_parser as parser
from manila import test
from manila.tests.share.drivers.emc.plugins.vnx import fakes
from manila.tests.share.drivers.emc.plugins.vnx import utils
class StorageObjectManagerTestCase(test.TestCase):
@mock.patch.object(connector, "XMLAPIConnector", mock.Mock())
@mock.patch.object(connector, "SSHConnector", mock.Mock())
def setUp(self):
super(StorageObjectManagerTestCase, self).setUp()
emd_share_driver = fakes.FakeEMCShareDriver()
self.manager = manager.StorageObjectManager(
emd_share_driver.configuration)
def test_get_storage_context(self):
type_map = {
'FileSystem': manager.FileSystem,
'StoragePool': manager.StoragePool,
'MountPoint': manager.MountPoint,
'Mover': manager.Mover,
'VDM': manager.VDM,
'Snapshot': manager.Snapshot,
'MoverInterface': manager.MoverInterface,
'DNSDomain': manager.DNSDomain,
'CIFSServer': manager.CIFSServer,
'CIFSShare': manager.CIFSShare,
'NFSShare': manager.NFSShare,
}
for key, value in type_map.items():
self.assertTrue(
isinstance(self.manager.getStorageContext(key), value))
for key in self.manager.context.keys():
self.assertTrue(key in type_map)
def test_get_storage_context_invalid_type(self):
fake_type = 'fake_type'
self.assertRaises(exception.EMCVnxXMLAPIError,
self.manager.getStorageContext,
fake_type)
class StorageObjectTestCaseBase(test.TestCase):
@mock.patch.object(connector, "XMLAPIConnector", mock.Mock())
@mock.patch.object(connector, "SSHConnector", mock.Mock())
def setUp(self):
super(StorageObjectTestCaseBase, self).setUp()
emd_share_driver = fakes.FakeEMCShareDriver()
self.manager = manager.StorageObjectManager(
emd_share_driver.configuration)
self.base = fakes.StorageObjectTestData()
self.pool = fakes.PoolTestData()
self.vdm = fakes.VDMTestData()
self.mover = fakes.MoverTestData()
self.fs = fakes.FileSystemTestData()
self.mount = fakes.MountPointTestData()
self.snap = fakes.SnapshotTestData()
self.cifs_share = fakes.CIFSShareTestData()
self.nfs_share = fakes.NFSShareTestData()
self.cifs_server = fakes.CIFSServerTestData()
self.dns = fakes.DNSDomainTestData()
class StorageObjectTestCase(StorageObjectTestCaseBase):
def test_xml_api_retry(self):
hook = utils.RequestSideEffect()
hook.append(self.base.resp_need_retry())
hook.append(self.base.resp_task_succeed())
elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE})
xml_parser = parser.XMLAPIParser()
storage_object = manager.StorageObject(self.manager.connectors,
elt_maker, xml_parser,
self.manager)
storage_object.conn['XML'].request = utils.EMCMock(side_effect=hook)
fake_req = storage_object._build_task_package(
elt_maker.StartFake(name='foo')
)
resp = storage_object._send_request(fake_req)
self.assertEqual('ok', resp['maxSeverity'])
expected_calls = [
mock.call(self.base.req_fake_start_task()),
mock.call(self.base.req_fake_start_task())
]
storage_object.conn['XML'].request.assert_has_calls(expected_calls)
class FileSystemTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
self.ssh_hook = utils.SSHSideEffect()
def test_create_file_system_on_vdm(self):
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.fs.resp_task_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.fs.filesystem_name,
size=self.fs.filesystem_size,
pool_name=self.pool.pool_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_file_system_on_mover(self):
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.fs.resp_task_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.fs.filesystem_name,
size=self.fs.filesystem_size,
pool_name=self.pool.pool_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.fs.req_create_on_mover()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_file_system_but_already_exist(self):
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.fs.resp_create_but_already_exist())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.fs.filesystem_name,
size=self.fs.filesystem_size,
pool_name=self.pool.pool_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_create_file_system_invalid_mover_id(self, sleep_mock):
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.fs.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.fs.resp_task_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.fs.filesystem_name,
size=self.fs.filesystem_size,
pool_name=self.pool.pool_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.fs.req_create_on_mover()),
mock.call(self.mover.req_get_ref()),
mock.call(self.fs.req_create_on_mover()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_file_system_with_error(self):
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.fs.resp_task_error())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
name=self.fs.filesystem_name,
size=self.fs.filesystem_size,
pool_name=self.pool.pool_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_file_system(self):
self.hook.append(self.fs.resp_get_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.fs.filesystem_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.fs.filesystem_name, context.filesystem_map)
property_map = [
'name',
'pools_id',
'volume_id',
'size',
'id',
'type',
'dataServicePolicies',
]
for prop in property_map:
self.assertIn(prop, out)
id = context.get_id(self.fs.filesystem_name)
self.assertEqual(self.fs.filesystem_id, id)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_file_system_but_not_found(self):
self.hook.append(self.fs.resp_get_but_not_found())
self.hook.append(self.fs.resp_get_without_value())
self.hook.append(self.fs.resp_get_error())
self.hook.append(self.fs.resp_get_but_not_found())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.fs.filesystem_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
status, out = context.get(self.fs.filesystem_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
status, out = context.get(self.fs.filesystem_name)
self.assertEqual(constants.STATUS_ERROR, status)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.get_id,
self.fs.filesystem_name)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_file_system_but_miss_property(self):
self.hook.append(self.fs.resp_get_but_miss_property())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.fs.filesystem_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.fs.filesystem_name, context.filesystem_map)
property_map = [
'name',
'pools_id',
'volume_id',
'size',
'id',
'type',
'dataServicePolicies',
]
for prop in property_map:
self.assertIn(prop, out)
self.assertIsNone(out['dataServicePolicies'])
id = context.get_id(self.fs.filesystem_name)
self.assertEqual(self.fs.filesystem_id, id)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_file_system(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.fs.resp_task_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(self.fs.filesystem_name)
self.assertNotIn(self.fs.filesystem_name, context.filesystem_map)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertNotIn(self.fs.filesystem_name, context.filesystem_map)
def test_delete_file_system_but_not_found(self):
self.hook.append(self.fs.resp_get_but_not_found())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(self.fs.filesystem_name)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_file_system_but_get_file_system_error(self):
self.hook.append(self.fs.resp_get_error())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
self.fs.filesystem_name)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_file_system_with_error(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.fs.resp_delete_but_failed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
self.fs.filesystem_name)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertIn(self.fs.filesystem_name, context.filesystem_map)
def test_extend_file_system(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.fs.resp_task_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.extend(name=self.fs.filesystem_name,
pool_name=self.pool.pool_name,
new_size=self.fs.filesystem_new_size)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_extend()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_extend_file_system_but_not_found(self):
self.hook.append(self.fs.resp_get_but_not_found())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.extend,
name=self.fs.filesystem_name,
pool_name=self.fs.pool_name,
new_size=self.fs.filesystem_new_size)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_extend_file_system_with_small_size(self):
self.hook.append(self.fs.resp_get_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.extend,
name=self.fs.filesystem_name,
pool_name=self.pool.pool_name,
new_size=1)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_extend_file_system_with_same_size(self):
self.hook.append(self.fs.resp_get_succeed())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.extend(name=self.fs.filesystem_name,
pool_name=self.pool.pool_name,
new_size=self.fs.filesystem_size)
expected_calls = [mock.call(self.fs.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_extend_file_system_with_error(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.pool.resp_get_succeed())
self.hook.append(self.fs.resp_extend_but_error())
context = self.manager.getStorageContext('FileSystem')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.extend,
name=self.fs.filesystem_name,
pool_name=self.pool.pool_name,
new_size=self.fs.filesystem_new_size)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_extend()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_filesystem_from_snapshot(self):
self.ssh_hook.append()
self.ssh_hook.append()
self.ssh_hook.append(self.fs.output_copy_ckpt)
self.ssh_hook.append(self.fs.output_info())
self.ssh_hook.append()
self.ssh_hook.append()
self.ssh_hook.append()
context = self.manager.getStorageContext('FileSystem')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.create_from_snapshot(self.fs.filesystem_name,
self.snap.src_snap_name,
self.fs.src_fileystems_name,
self.pool.pool_name,
self.vdm.vdm_name,
self.mover.interconnect_id,)
ssh_calls = [
mock.call(self.fs.cmd_create_from_ckpt(), False),
mock.call(self.mount.cmd_server_mount('ro'), False),
mock.call(self.fs.cmd_copy_ckpt(), True),
mock.call(self.fs.cmd_nas_fs_info(), False),
mock.call(self.mount.cmd_server_umount(), False),
mock.call(self.fs.cmd_delete(), False),
mock.call(self.mount.cmd_server_mount('rw'), False),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_create_filesystem_from_snapshot_with_error(self):
self.ssh_hook.append()
self.ssh_hook.append()
self.ssh_hook.append(ex=processutils.ProcessExecutionError(
stdout=self.fs.fake_output, stderr=None))
self.ssh_hook.append(self.fs.output_info())
self.ssh_hook.append()
self.ssh_hook.append()
self.ssh_hook.append()
context = self.manager.getStorageContext('FileSystem')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.create_from_snapshot(
self.fs.filesystem_name,
self.snap.src_snap_name,
self.fs.src_fileystems_name,
self.pool.pool_name,
self.vdm.vdm_name,
self.mover.interconnect_id, )
ssh_calls = [
mock.call(self.fs.cmd_create_from_ckpt(), False),
mock.call(self.mount.cmd_server_mount('ro'), False),
mock.call(self.fs.cmd_copy_ckpt(), True),
mock.call(self.fs.cmd_nas_fs_info(), False),
mock.call(self.mount.cmd_server_umount(), False),
mock.call(self.fs.cmd_delete(), False),
mock.call(self.mount.cmd_server_mount('rw'), False),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
class MountPointTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
def test_create_mount_point_on_vdm(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_task_succeed())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(mount_path=self.mount.path,
fs_name=self.fs.filesystem_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_create(self.vdm.vdm_id, True)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_mount_point_on_mover(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_task_succeed())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(mount_path=self.mount.path,
fs_name=self.fs.filesystem_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_create(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_mount_point_but_already_exist(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_create_but_already_exist())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(mount_path=self.mount.path,
fs_name=self.fs.filesystem_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_create(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_create_mount_point_invalid_mover_id(self, sleep_mock):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_task_succeed())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(mount_path=self.mount.path,
fs_name=self.fs.filesystem_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_create(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_create(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_mount_point_with_error(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_task_error())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
mount_path=self.mount.path,
fs_name=self.fs.filesystem_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_create(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_mount_point_on_vdm(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_task_succeed())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(mount_path=self.mount.path,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_mount_point_on_mover(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_task_succeed())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(mount_path=self.mount.path,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_delete(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_mount_point_but_nonexistent(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_delete_but_nonexistent())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(mount_path=self.mount.path,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_delete_mount_point_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_task_succeed())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(mount_path=self.mount.path,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_delete(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_delete(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_delete_mount_point_with_error(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_task_error())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
mount_path=self.mount.path,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mount_points(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.mount.resp_get_succeed(self.vdm.vdm_id))
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id,
False))
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_OK, status)
property_map = [
'path',
'mover',
'moverIdIsVdm',
'fileSystem',
]
for item in out:
for prop in property_map:
self.assertIn(prop, item)
status, out = context.get(self.mover.mover_name, False)
self.assertEqual(constants.STATUS_OK, status)
property_map = [
'path',
'mover',
'moverIdIsVdm',
'fileSystem',
]
for item in out:
for prop in property_map:
self.assertIn(prop, item)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_get(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_get(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mount_points_but_not_found(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_get_without_value())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.mover.mover_name, False)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_get(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_get_mount_points_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id,
False))
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.mover.mover_name, False)
self.assertEqual(constants.STATUS_OK, status)
property_map = [
'path',
'mover',
'moverIdIsVdm',
'fileSystem',
]
for item in out:
for prop in property_map:
self.assertIn(prop, item)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_get(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_get(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_get_mount_points_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mount.resp_get_error())
context = self.manager.getStorageContext('MountPoint')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.mover.mover_name, False)
self.assertEqual(constants.STATUS_ERROR, status)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mount.req_get(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
class VDMTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
self.ssh_hook = utils.SSHSideEffect()
def test_create_vdm(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.vdm.resp_task_succeed())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(self.vdm.vdm_name, self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_vdm_but_already_exist(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.vdm.resp_create_but_already_exist())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Create VDM which already exists.
context.create(self.vdm.vdm_name, self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_create_vdm_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.vdm.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.vdm.resp_task_succeed())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Create VDM with invalid mover ID
context.create(self.vdm.vdm_name, self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_vdm_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.vdm.resp_task_error())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Create VDM with invalid mover ID
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
name=self.vdm.vdm_name,
mover_name=self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_vdm(self):
self.hook.append(self.vdm.resp_get_succeed())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.vdm.vdm_name, context.vdm_map)
property_map = [
'name',
'id',
'state',
'host_mover_id',
'interfaces',
]
for prop in property_map:
self.assertIn(prop, out)
expected_calls = [mock.call(self.vdm.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_vdm_with_error(self):
self.hook.append(self.vdm.resp_get_error())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Get VDM with error
status, out = context.get(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_ERROR, status)
expected_calls = [mock.call(self.vdm.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_vdm_but_not_found(self):
self.hook.append(self.vdm.resp_get_without_value())
self.hook.append(self.vdm.resp_get_succeed('fake'))
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Get VDM which does not exist
status, out = context.get(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
status, out = context.get(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.vdm.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_vdm_id_with_error(self):
self.hook.append(self.vdm.resp_get_error())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.get_id,
self.vdm.vdm_name)
expected_calls = [mock.call(self.vdm.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_vdm(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.vdm.resp_task_succeed())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(self.vdm.vdm_name)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.vdm.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_vdm_but_not_found(self):
self.hook.append(self.vdm.resp_get_but_not_found())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(self.vdm.vdm_name)
expected_calls = [mock.call(self.vdm.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_vdm_but_failed_to_get_vdm(self):
self.hook.append(self.vdm.resp_get_error())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
self.vdm.vdm_name)
expected_calls = [mock.call(self.vdm.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_vdm_with_error(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.vdm.resp_task_error())
context = self.manager.getStorageContext('VDM')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
self.vdm.vdm_name)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.vdm.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_attach_detach_nfs_interface(self):
self.ssh_hook.append()
self.ssh_hook.append()
context = self.manager.getStorageContext('VDM')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.attach_nfs_interface(self.vdm.vdm_name,
self.mover.interface_name2)
context.detach_nfs_interface(self.vdm.vdm_name,
self.mover.interface_name2)
ssh_calls = [
mock.call(self.vdm.cmd_attach_nfs_interface(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_detach_nfs_interface_with_error(self):
self.ssh_hook.append(ex=processutils.ProcessExecutionError(
stdout=self.vdm.fake_output))
self.ssh_hook.append(self.vdm.output_get_interfaces(
self.mover.interface_name2))
self.ssh_hook.append(ex=processutils.ProcessExecutionError(
stdout=self.vdm.fake_output))
self.ssh_hook.append(self.vdm.output_get_interfaces(
nfs_interface=fakes.FakeData.interface_name1))
context = self.manager.getStorageContext('VDM')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.detach_nfs_interface,
self.vdm.vdm_name,
self.mover.interface_name2)
context.detach_nfs_interface(self.vdm.vdm_name,
self.mover.interface_name2)
ssh_calls = [
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
mock.call(self.vdm.cmd_get_interfaces(), False),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_get_cifs_nfs_interface(self):
self.ssh_hook.append(self.vdm.output_get_interfaces())
context = self.manager.getStorageContext('VDM')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
interfaces = context.get_interfaces(self.vdm.vdm_name)
self.assertIsNotNone(interfaces['cifs'])
self.assertIsNotNone(interfaces['nfs'])
ssh_calls = [mock.call(self.vdm.cmd_get_interfaces(), False)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
class StoragePoolTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
def test_get_pool(self):
self.hook.append(self.pool.resp_get_succeed())
context = self.manager.getStorageContext('StoragePool')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.pool.pool_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.pool.pool_name, context.pool_map)
property_map = [
'name',
'movers_id',
'total_size',
'used_size',
'diskType',
'dataServicePolicies',
'id',
]
for prop in property_map:
self.assertIn(prop, out)
expected_calls = [mock.call(self.pool.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_pool_with_error(self):
self.hook.append(self.pool.resp_get_error())
self.hook.append(self.pool.resp_get_without_value())
self.hook.append(self.pool.resp_get_succeed(name='other'))
context = self.manager.getStorageContext('StoragePool')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.pool.pool_name)
self.assertEqual(constants.STATUS_ERROR, status)
status, out = context.get(self.pool.pool_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
status, out = context.get(self.pool.pool_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.pool.req_get()),
mock.call(self.pool.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_pool_id_with_error(self):
self.hook.append(self.pool.resp_get_error())
context = self.manager.getStorageContext('StoragePool')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.get_id,
self.pool.pool_name)
expected_calls = [mock.call(self.pool.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
class MoverTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
self.ssh_hook = utils.SSHSideEffect()
def test_get_mover(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_succeed())
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.mover.mover_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.mover.mover_name, context.mover_map)
property_map = [
'name',
'id',
'Status',
'version',
'uptime',
'role',
'interfaces',
'devices',
'dns_domain',
]
for prop in property_map:
self.assertIn(prop, out)
status, out = context.get(self.mover.mover_name)
self.assertEqual(constants.STATUS_OK, status)
status, out = context.get(self.mover.mover_name, True)
self.assertEqual(constants.STATUS_OK, status)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_ref_not_found(self):
self.hook.append(self.mover.resp_get_ref_succeed(name='other'))
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get_ref(self.mover.mover_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [mock.call(self.mover.req_get_ref())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_ref_with_error(self):
self.hook.append(self.mover.resp_get_error())
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get_ref(self.mover.mover_name)
self.assertEqual(constants.STATUS_ERROR, status)
expected_calls = [mock.call(self.mover.req_get_ref())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_ref_and_mover(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_succeed())
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get_ref(self.mover.mover_name)
self.assertEqual(constants.STATUS_OK, status)
property_map = ['name', 'id']
for prop in property_map:
self.assertIn(prop, out)
status, out = context.get(self.mover.mover_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.mover.mover_name, context.mover_map)
property_map = [
'name',
'id',
'Status',
'version',
'uptime',
'role',
'interfaces',
'devices',
'dns_domain',
]
for prop in property_map:
self.assertIn(prop, out)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_failed_to_get_mover_ref(self):
self.hook.append(self.mover.resp_get_error())
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.get,
self.mover.mover_name)
expected_calls = [mock.call(self.mover.req_get_ref())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_but_not_found(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_without_value())
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(name=self.mover.mover_name, force=True)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_error())
context = self.manager.getStorageContext('Mover')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.mover.mover_name)
self.assertEqual(constants.STATUS_ERROR, status)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_interconnect_id(self):
self.ssh_hook.append(self.mover.output_get_interconnect_id())
context = self.manager.getStorageContext('Mover')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
conn_id = context.get_interconnect_id(self.mover.mover_name,
self.mover.mover_name)
self.assertEqual(self.mover.interconnect_id, conn_id)
ssh_calls = [mock.call(self.mover.cmd_get_interconnect_id(), False)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_get_physical_devices(self):
self.ssh_hook.append(self.mover.output_get_physical_devices())
context = self.manager.getStorageContext('Mover')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
devices = context.get_physical_devices(self.mover.mover_name)
self.assertIn(self.mover.device_name, devices)
ssh_calls = [mock.call(self.mover.cmd_get_physical_devices(), False)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
class SnapshotTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
def test_create_snapshot(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.snap.resp_task_succeed())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.snap.snapshot_name,
fs_name=self.fs.filesystem_name,
pool_id=self.pool.pool_id)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.snap.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_snapshot_but_already_exist(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.snap.resp_create_but_already_exist())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.snap.snapshot_name,
fs_name=self.fs.filesystem_name,
pool_id=self.pool.pool_id,
ckpt_size=self.snap.snapshot_size)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.snap.req_create_with_size()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_snapshot_with_error(self):
self.hook.append(self.fs.resp_get_succeed())
self.hook.append(self.snap.resp_task_error())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
name=self.snap.snapshot_name,
fs_name=self.fs.filesystem_name,
pool_id=self.pool.pool_id,
ckpt_size=self.snap.snapshot_size)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.snap.req_create_with_size()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_snapshot(self):
self.hook.append(self.snap.resp_get_succeed())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.snap.snapshot_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.snap.snapshot_name, context.snap_map)
property_map = [
'name',
'id',
'checkpointOf',
'state',
]
for prop in property_map:
self.assertIn(prop, out)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_snapshot_but_not_found(self):
self.hook.append(self.snap.resp_get_without_value())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.snap.snapshot_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_snapshot_with_error(self):
self.hook.append(self.snap.resp_get_error())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(self.snap.snapshot_name)
self.assertEqual(constants.STATUS_ERROR, status)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_snapshot(self):
self.hook.append(self.snap.resp_get_succeed())
self.hook.append(self.snap.resp_task_succeed())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(self.snap.snapshot_name)
self.assertNotIn(self.snap.snapshot_name, context.snap_map)
expected_calls = [
mock.call(self.snap.req_get()),
mock.call(self.snap.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_snapshot_failed_to_get_snapshot(self):
self.hook.append(self.snap.resp_get_error())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
self.snap.snapshot_name)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_snapshot_but_not_found(self):
self.hook.append(self.snap.resp_get_without_value())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(self.snap.snapshot_name)
self.assertNotIn(self.snap.snapshot_name, context.snap_map)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_snapshot_with_error(self):
self.hook.append(self.snap.resp_get_succeed())
self.hook.append(self.snap.resp_task_error())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
self.snap.snapshot_name)
expected_calls = [
mock.call(self.snap.req_get()),
mock.call(self.snap.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_snapshot_id(self):
self.hook.append(self.snap.resp_get_succeed())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
id = context.get_id(self.snap.snapshot_name)
self.assertEqual(self.snap.snapshot_id, id)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_snapshot_id_with_error(self):
self.hook.append(self.snap.resp_get_error())
context = self.manager.getStorageContext('Snapshot')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.get_id,
self.snap.snapshot_name)
expected_calls = [mock.call(self.snap.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
@ddt.ddt
class MoverInterfaceTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
def test_create_mover_interface(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_task_succeed())
self.hook.append(self.mover.resp_task_succeed())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
interface = {
'name': self.mover.interface_name1,
'device_name': self.mover.device_name,
'ip': self.mover.ip_address1,
'mover_name': self.mover.mover_name,
'net_mask': self.mover.net_mask,
'vlan_id': self.mover.vlan_id,
}
context.create(interface)
interface['name'] = self.mover.long_interface_name
context.create(interface)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
mock.call(self.mover.req_create_interface(
self.mover.long_interface_name[:31])),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_mover_interface_name_already_exist(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(
self.mover.resp_create_interface_but_name_already_exist())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
interface = {
'name': self.mover.interface_name1,
'device_name': self.mover.device_name,
'ip': self.mover.ip_address1,
'mover_name': self.mover.mover_name,
'net_mask': self.mover.net_mask,
'vlan_id': self.mover.vlan_id,
}
context.create(interface)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_mover_interface_ip_already_exist(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(
self.mover.resp_create_interface_but_ip_already_exist())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
interface = {
'name': self.mover.interface_name1,
'device_name': self.mover.device_name,
'ip': self.mover.ip_address1,
'mover_name': self.mover.mover_name,
'net_mask': self.mover.net_mask,
'vlan_id': self.mover.vlan_id,
}
context.create(interface)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@ddt.data(fakes.MoverTestData().resp_task_succeed(),
fakes.MoverTestData().resp_task_error())
def test_create_mover_interface_with_conflict_vlan_id(self, xml_resp):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(
self.mover.resp_create_interface_with_conflicted_vlan_id())
self.hook.append(xml_resp)
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
interface = {
'name': self.mover.interface_name1,
'device_name': self.mover.device_name,
'ip': self.mover.ip_address1,
'mover_name': self.mover.mover_name,
'net_mask': self.mover.net_mask,
'vlan_id': self.mover.vlan_id,
}
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
interface)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
mock.call(self.mover.req_delete_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_create_mover_interface_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_task_succeed())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
interface = {
'name': self.mover.interface_name1,
'device_name': self.mover.device_name,
'ip': self.mover.ip_address1,
'mover_name': self.mover.mover_name,
'net_mask': self.mover.net_mask,
'vlan_id': self.mover.vlan_id,
}
context.create(interface)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_mover_interface_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_task_error())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
interface = {
'name': self.mover.interface_name1,
'device_name': self.mover.device_name,
'ip': self.mover.ip_address1,
'mover_name': self.mover.mover_name,
'net_mask': self.mover.net_mask,
'vlan_id': self.mover.vlan_id,
}
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
interface)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_interface(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_succeed())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(name=self.mover.interface_name1,
mover_name=self.mover.mover_name)
self.assertEqual(constants.STATUS_OK, status)
property_map = [
'name',
'device',
'up',
'ipVersion',
'netMask',
'ipAddress',
'vlanid',
]
for prop in property_map:
self.assertIn(prop, out)
context.get(name=self.mover.long_interface_name,
mover_name=self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_mover_interface_not_found(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_get_without_value())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(name=self.mover.interface_name1,
mover_name=self.mover.mover_name)
self.assertEqual(constants.STATUS_NOT_FOUND, status)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_mover_interface(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_task_succeed())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(ip_addr=self.mover.ip_address1,
mover_name=self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_mover_interface_but_nonexistent(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_delete_interface_but_nonexistent())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(ip_addr=self.mover.ip_address1,
mover_name=self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_delete_mover_interface_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_task_succeed())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(ip_addr=self.mover.ip_address1,
mover_name=self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_delete_mover_interface_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.mover.resp_task_error())
context = self.manager.getStorageContext('MoverInterface')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
ip_addr=self.mover.ip_address1,
mover_name=self.mover.mover_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
class DNSDomainTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
def test_create_dns_domain(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_task_succeed())
context = self.manager.getStorageContext('DNSDomain')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(mover_name=self.mover.mover_name,
name=self.dns.domain_name,
servers=self.dns.dns_ip_address)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_create_dns_domain_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_task_succeed())
context = self.manager.getStorageContext('DNSDomain')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(mover_name=self.mover.mover_name,
name=self.dns.domain_name,
servers=self.dns.dns_ip_address)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_create()),
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_dns_domain_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_task_error())
context = self.manager.getStorageContext('DNSDomain')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
mover_name=self.mover.mover_name,
name=self.mover.domain_name,
servers=self.dns.dns_ip_address)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_create()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_dns_domain(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_task_succeed())
self.hook.append(self.dns.resp_task_error())
context = self.manager.getStorageContext('DNSDomain')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(mover_name=self.mover.mover_name,
name=self.mover.domain_name)
context.delete(mover_name=self.mover.mover_name,
name=self.mover.domain_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_delete()),
mock.call(self.dns.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_delete_dns_domain_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.dns.resp_task_succeed())
context = self.manager.getStorageContext('DNSDomain')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(mover_name=self.mover.mover_name,
name=self.mover.domain_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_delete()),
mock.call(self.mover.req_get_ref()),
mock.call(self.dns.req_delete()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
class CIFSServerTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
def test_create_cifs_server(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_task_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_task_succeed())
self.hook.append(self.cifs_server.resp_task_error())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Create CIFS server on mover
cifs_server_args = {
'name': self.cifs_server.cifs_server_name,
'interface_ip': self.cifs_server.ip_address1,
'domain_name': self.cifs_server.domain_name,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.mover.mover_name,
'is_vdm': False,
}
context.create(cifs_server_args)
# Create CIFS server on VDM
cifs_server_args = {
'name': self.cifs_server.cifs_server_name,
'interface_ip': self.cifs_server.ip_address1,
'domain_name': self.cifs_server.domain_name,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
'is_vdm': True,
}
context.create(cifs_server_args)
# Create CIFS server on VDM
cifs_server_args = {
'name': self.cifs_server.cifs_server_name,
'interface_ip': self.cifs_server.ip_address1,
'domain_name': self.cifs_server.domain_name,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
'is_vdm': True,
}
context.create(cifs_server_args)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_create(self.mover.mover_id, False)),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_create_cifs_server_already_exist(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_task_error())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
@mock.patch('time.sleep')
def test_create_cifs_server_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_task_succeed())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Create CIFS server on mover
cifs_server_args = {
'name': self.cifs_server.cifs_server_name,
'interface_ip': self.cifs_server.ip_address1,
'domain_name': self.cifs_server.domain_name,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.mover.mover_name,
'is_vdm': False,
}
context.create(cifs_server_args)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_create(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_create(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_cifs_server_with_error(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_task_error())
self.hook.append(self.cifs_server.resp_get_error())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
# Create CIFS server on VDM
cifs_server_args = {
'name': self.cifs_server.cifs_server_name,
'interface_ip': self.cifs_server.ip_address1,
'domain_name': self.cifs_server.domain_name,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
'is_vdm': True,
}
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
cifs_server_args)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_all_cifs_server(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get_all(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.vdm.vdm_name, context.cifs_server_map)
# Get CIFS server from the cache
status, out = context.get_all(self.vdm.vdm_name)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.vdm.vdm_name, context.cifs_server_map)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_get_all_cifs_server_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.mover.mover_id, is_vdm=False, join_domain=True))
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get_all(self.mover.mover_name, False)
self.assertEqual(constants.STATUS_OK, status)
self.assertIn(self.mover.mover_name, context.cifs_server_map)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_get(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_get(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_get_cifs_server(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
status, out = context.get(name=self.cifs_server.cifs_server_name,
mover_name=self.vdm.vdm_name)
self.assertEqual(constants.STATUS_OK, status)
property_map = {
'name',
'compName',
'Aliases',
'type',
'interfaces',
'domain',
'domainJoined',
'mover',
'moverIdIsVdm',
}
for prop in property_map:
self.assertIn(prop, out)
context.get(name=self.cifs_server.cifs_server_name,
mover_name=self.vdm.vdm_name)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_modify_cifs_server(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_task_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_task_succeed())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
cifs_server_args = {
'name': self.cifs_server.cifs_server_name[-14:],
'join_domain': True,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.mover.mover_name,
'is_vdm': False,
}
context.modify(cifs_server_args)
cifs_server_args = {
'name': self.cifs_server.cifs_server_name[-14:],
'join_domain': False,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
}
context.modify(cifs_server_args)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_modify(
mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_modify(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_modify_cifs_server_but_unjoin_domain(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_modify_but_unjoin_domain())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
cifs_server_args = {
'name': self.cifs_server.cifs_server_name[-14:],
'join_domain': False,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
}
context.modify(cifs_server_args)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_modify(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_modify_cifs_server_but_already_join_domain(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(
self.cifs_server.resp_modify_but_already_join_domain())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
cifs_server_args = {
'name': self.cifs_server.cifs_server_name[-14:],
'join_domain': True,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
}
context.modify(cifs_server_args)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_modify(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_modify_cifs_server_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_task_succeed())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
cifs_server_args = {
'name': self.cifs_server.cifs_server_name[-14:],
'join_domain': True,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.mover.mover_name,
'is_vdm': False,
}
context.modify(cifs_server_args)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_modify(
mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_modify(
mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_modify_cifs_server_with_error(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_task_error())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
cifs_server_args = {
'name': self.cifs_server.cifs_server_name[-14:],
'join_domain': False,
'user_name': self.cifs_server.domain_user,
'password': self.cifs_server.domain_password,
'mover_name': self.vdm.vdm_name,
}
self.assertRaises(exception.EMCVnxXMLAPIError,
context.modify,
cifs_server_args)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_modify(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_cifs_server(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.mover.mover_id, is_vdm=False, join_domain=True))
self.hook.append(self.cifs_server.resp_task_succeed())
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False))
self.hook.append(self.cifs_server.resp_task_succeed())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(computer_name=self.cifs_server.cifs_server_name,
mover_name=self.mover.mover_name,
is_vdm=False)
context.delete(computer_name=self.cifs_server.cifs_server_name,
mover_name=self.vdm.vdm_name)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_get(self.mover.mover_id, False)),
mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_cifs_server_but_not_found(self):
self.hook.append(self.mover.resp_get_without_value())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_get_without_value())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(computer_name=self.cifs_server.cifs_server_name,
mover_name=self.mover.mover_name,
is_vdm=False)
context.delete(computer_name=self.cifs_server.cifs_server_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_get(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_cifs_server_with_error(self):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.mover.mover_id, is_vdm=False, join_domain=True))
self.hook.append(self.cifs_server.resp_task_error())
context = self.manager.getStorageContext('CIFSServer')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
computer_name=self.cifs_server.cifs_server_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_server.req_get(self.mover.mover_id, False)),
mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
class CIFSShareTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.hook = utils.RequestSideEffect()
self.ssh_hook = utils.SSHSideEffect()
def test_create_cifs_share(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_share.resp_task_succeed())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_share.resp_task_succeed())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.cifs_share.share_name,
server_name=self.cifs_share.cifs_server_name[-14:],
mover_name=self.vdm.vdm_name,
is_vdm=True)
context.create(name=self.cifs_share.share_name,
server_name=self.cifs_share.cifs_server_name[-14:],
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_create(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_share.req_create(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_create_cifs_share_invalid_mover_id(self, sleep_mock):
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_share.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_share.resp_task_succeed())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.create(name=self.cifs_share.share_name,
server_name=self.cifs_share.cifs_server_name[-14:],
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_share.req_create(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_share.req_create(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_create_cifs_share_with_error(self):
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_share.resp_task_error())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
name=self.cifs_share.share_name,
server_name=self.cifs_share.cifs_server_name[-14:],
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_create(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_cifs_share(self):
self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_share.resp_task_succeed())
self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id,
False))
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_share.resp_task_succeed())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(name=self.cifs_share.share_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
context.delete(name=self.cifs_share.share_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)),
mock.call(self.cifs_share.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_delete_cifs_share_not_found(self):
self.hook.append(self.cifs_share.resp_get_error())
self.hook.append(self.cifs_share.resp_get_without_value())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
name=self.cifs_share.share_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
context.delete(name=self.cifs_share.share_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.cifs_share.req_get()),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
@mock.patch('time.sleep')
def test_delete_cifs_share_invalid_mover_id(self, sleep_mock):
self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id,
False))
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_share.resp_invalid_mover_id())
self.hook.append(self.mover.resp_get_ref_succeed())
self.hook.append(self.cifs_share.resp_task_succeed())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.delete(name=self.cifs_share.share_name,
mover_name=self.mover.mover_name,
is_vdm=False)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)),
mock.call(self.mover.req_get_ref()),
mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
self.assertTrue(sleep_mock.called)
def test_delete_cifs_share_with_error(self):
self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
self.hook.append(self.vdm.resp_get_succeed())
self.hook.append(self.cifs_share.resp_task_error())
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
name=self.cifs_share.share_name,
mover_name=self.vdm.vdm_name,
is_vdm=True)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)),
]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_get_cifs_share(self):
self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
context = self.manager.getStorageContext('CIFSShare')
context.conn['XML'].request = utils.EMCMock(side_effect=self.hook)
context.get(self.cifs_share.share_name)
expected_calls = [mock.call(self.cifs_share.req_get())]
context.conn['XML'].request.assert_has_calls(expected_calls)
def test_disable_share_access(self):
self.ssh_hook.append('Command succeeded')
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.disable_share_access(share_name=self.cifs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_disable_share_access_with_error(self):
self.ssh_hook.append(ex=processutils.ProcessExecutionError(
stdout=self.cifs_share.fake_output))
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.disable_share_access,
share_name=self.cifs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_allow_share_access(self):
self.ssh_hook.append(self.cifs_share.output_allow_access())
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.allow_share_access(mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_allow_share_access_duplicate_ACE(self):
expt_dup_ace = processutils.ProcessExecutionError(
stdout=self.cifs_share.output_allow_access_but_duplicate_ace())
self.ssh_hook.append(ex=expt_dup_ace)
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.allow_share_access(mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_allow_share_access_with_error(self):
expt_err = processutils.ProcessExecutionError(
self.cifs_share.fake_output)
self.ssh_hook.append(ex=expt_err)
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.allow_share_access,
mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_share_access(self):
self.ssh_hook.append('Command succeeded')
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.deny_share_access(mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(action='revoke'),
True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_share_access_no_ace(self):
expt_no_ace = processutils.ProcessExecutionError(
stdout=self.cifs_share.output_deny_access_but_no_ace())
self.ssh_hook.append(ex=expt_no_ace)
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.deny_share_access(mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(action='revoke'),
True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_share_access_but_no_user_found(self):
expt_no_user = processutils.ProcessExecutionError(
stdout=self.cifs_share.output_deny_access_but_no_user_found())
self.ssh_hook.append(ex=expt_no_user)
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.deny_share_access(mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(action='revoke'),
True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_share_access_with_error(self):
expt_err = processutils.ProcessExecutionError(
self.cifs_share.fake_output)
self.ssh_hook.append(ex=expt_err)
context = self.manager.getStorageContext('CIFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.deny_share_access,
mover_name=self.vdm.vdm_name,
share_name=self.cifs_share.share_name,
user_name=self.cifs_server.domain_user,
domain=self.cifs_server.domain_name,
access=constants.CIFS_ACL_FULLCONTROL)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(action='revoke'),
True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
class NFSShareTestCase(StorageObjectTestCaseBase):
def setUp(self):
super(self.__class__, self).setUp()
self.ssh_hook = utils.SSHSideEffect()
def test_create_nfs_share(self):
self.ssh_hook.append(self.nfs_share.output_create())
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.create(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_create_nfs_share_with_error(self):
expt_err = processutils.ProcessExecutionError(
stdout=self.nfs_share.fake_output)
self.ssh_hook.append(ex=expt_err)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.create,
name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_delete_nfs_share(self):
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
self.ssh_hook.append(self.nfs_share.output_delete_succeed())
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.delete(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), False),
mock.call(self.nfs_share.cmd_delete(), True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_delete_nfs_share_not_found(self):
expt_not_found = processutils.ProcessExecutionError(
stdout=self.nfs_share.output_get_but_not_found())
self.ssh_hook.append(ex=expt_not_found)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.delete(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
@mock.patch('time.sleep')
def test_delete_nfs_share_locked(self, sleep_mock):
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
expt_locked = processutils.ProcessExecutionError(
stdout=self.nfs_share.output_delete_but_locked())
self.ssh_hook.append(ex=expt_locked)
self.ssh_hook.append(self.nfs_share.output_delete_succeed())
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.delete(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), False),
mock.call(self.nfs_share.cmd_delete(), True),
mock.call(self.nfs_share.cmd_delete(), True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
self.assertTrue(sleep_mock.called)
def test_delete_nfs_share_with_error(self):
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
expt_err = processutils.ProcessExecutionError(
stdout=self.nfs_share.fake_output)
self.ssh_hook.append(ex=expt_err)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.delete,
name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), False),
mock.call(self.nfs_share.cmd_delete(), True),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_get_nfs_share(self):
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.get(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
# Get NFS share from cache
context.get(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_get_nfs_share_not_found(self):
expt_not_found = processutils.ProcessExecutionError(
stdout=self.nfs_share.output_get_but_not_found())
self.ssh_hook.append(ex=expt_not_found)
self.ssh_hook.append(self.nfs_share.output_get_but_not_found())
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
context.get(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
context.get(name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), False),
mock.call(self.nfs_share.cmd_get(), False),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_get_nfs_share_with_error(self):
expt_err = processutils.ProcessExecutionError(
stdout=self.nfs_share.fake_output)
self.ssh_hook.append(ex=expt_err)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.get,
name=self.nfs_share.share_name,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_allow_share_access(self):
rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts)
rw_hosts.append(self.nfs_share.nfs_host_ip)
ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts)
ro_hosts.append(self.nfs_share.nfs_host_ip)
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
self.ssh_hook.append(self.nfs_share.output_set_access_success())
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts))
self.ssh_hook.append(self.nfs_share.output_set_access_success())
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts))
self.ssh_hook.append(self.nfs_share.output_set_access_success())
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts))
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = utils.EMCNFSShareMock(
side_effect=self.ssh_hook)
context.allow_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name,
access_level=const.ACCESS_LEVEL_RW)
context.allow_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name,
access_level=const.ACCESS_LEVEL_RO)
context.allow_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name,
access_level=const.ACCESS_LEVEL_RW)
context.allow_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name,
access_level=const.ACCESS_LEVEL_RW)
ssh_calls = [
mock.call(self.nfs_share.cmd_get()),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)),
mock.call(self.nfs_share.cmd_get()),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)),
mock.call(self.nfs_share.cmd_get()),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)),
mock.call(self.nfs_share.cmd_get()),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_allow_share_access_not_found(self):
expt_not_found = processutils.ProcessExecutionError(
stdout=self.nfs_share.output_get_but_not_found())
self.ssh_hook.append(ex=expt_not_found)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = utils.EMCNFSShareMock(
side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.allow_share_access,
share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name,
access_level=const.ACCESS_LEVEL_RW)
ssh_calls = [mock.call(self.nfs_share.cmd_get())]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_rw_share_access(self):
rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts)
rw_hosts.append(self.nfs_share.nfs_host_ip)
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts))
self.ssh_hook.append(self.nfs_share.output_set_access_success())
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = utils.EMCNFSShareMock(
side_effect=self.ssh_hook)
context.deny_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get()),
mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts,
self.nfs_share.ro_hosts)),
mock.call(self.nfs_share.cmd_get()),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_ro_share_access(self):
ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts)
ro_hosts.append(self.nfs_share.nfs_host_ip)
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts))
self.ssh_hook.append(self.nfs_share.output_set_access_success())
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = utils.EMCNFSShareMock(
side_effect=self.ssh_hook)
context.deny_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name)
context.deny_share_access(share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get()),
mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts,
self.nfs_share.ro_hosts)),
mock.call(self.nfs_share.cmd_get()),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_share_not_found(self):
expt_not_found = processutils.ProcessExecutionError(
stdout=self.nfs_share.output_get_but_not_found())
self.ssh_hook.append(ex=expt_not_found)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = utils.EMCNFSShareMock(
side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.deny_share_access,
share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name)
ssh_calls = [mock.call(self.nfs_share.cmd_get())]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
def test_deny_rw_share_with_error(self):
rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts)
rw_hosts.append(self.nfs_share.nfs_host_ip)
self.ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts))
expt_not_found = processutils.ProcessExecutionError(
stdout=self.nfs_share.output_get_but_not_found())
self.ssh_hook.append(ex=expt_not_found)
context = self.manager.getStorageContext('NFSShare')
context.conn['SSH'].run_ssh = utils.EMCNFSShareMock(
side_effect=self.ssh_hook)
self.assertRaises(exception.EMCVnxXMLAPIError,
context.deny_share_access,
share_name=self.nfs_share.share_name,
host_ip=self.nfs_share.nfs_host_ip,
mover_name=self.vdm.vdm_name)
ssh_calls = [
mock.call(self.nfs_share.cmd_get()),
mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts,
self.nfs_share.ro_hosts)),
]
context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls)
| apache-2.0 |
Agiliza/AgilizaFramework | tests/agiliza/core/utils/patterns/test_singleton.py | 1 | 1894 | """
This file is part of Agiliza.
Agiliza is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Agiliza is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Agiliza. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) 2012 Vicente Ruiz <vruiz2.0@gmail.com>
"""
import unittest
from agiliza.core.utils.patterns import Singleton
class SingletonTest(unittest.TestCase):
def test_must_retrieve_the_same_instance(self):
class SingletonExample(Singleton): pass
instance1 = SingletonExample()
instance2 = SingletonExample.getInstance()
self.assertEqual(
instance1, instance2,
"Singleton makes different instances"
)
def test_must_retrieve_the_same_instance_multiple_times(self):
class SingletonExample(Singleton): pass
instance1 = SingletonExample()
SingletonExample()
SingletonExample()
instance2 = SingletonExample()
self.assertEqual(
instance1, instance2,
"Singleton makes different instances"
)
def test_must_invalidate_a_instance(self):
class SingletonExample(Singleton): pass
instance1 = SingletonExample.getInstance()
SingletonExample.invalidateInstance()
instance2 = SingletonExample()
self.assertNotEqual(
instance1, instance2,
"Singleton does not invalidate instances"
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Giftingnation/GN-Oscar-Custom | oscar/profiling/middleware.py | 6 | 3427 | import sys
import tempfile
import hotshot
import hotshot.stats
from cStringIO import StringIO
import cProfile
import pstats
def profile_this(fn):
def profiled_fn(*args, **kwargs):
filepath = "/tmp/%s.profile" % fn.__name__
prof = cProfile.Profile()
ret = prof.runcall(fn, *args, **kwargs)
print "Writing to %s" % filepath
prof.dump_stats(filepath)
print "Printing stats"
stats = pstats.Stats(filepath)
stats.sort_stats('cumulative')
stats.print_stats()
return ret
return profiled_fn
class BaseMiddleware(object):
query_param = None
def show_profile(self, request):
return self.query_param in request.GET
def process_request(self, request):
if self.show_profile(request):
if 'prof_file' in request.GET:
# It's sometimes useful to generate a file of output that can
# converted for use with kcachegrind. To convert this file,
# use:
#
# pyprof2calltree -o /tmp/callgrind.stats -i /tmp/out.stats
#
# then open the file in kcachegrind.
self.tmpfile = open('/tmp/out.stats', 'w')
else:
self.tmpfile = tempfile.NamedTemporaryFile()
self.profile = self.profiler()
def profiler(self):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
# We profile the view call - note that this misses the rest of Django's
# request processing (eg middleware etc)
if self.show_profile(request):
return self.profile.runcall(
callback, request, *callback_args, **callback_kwargs)
def process_response(self, request, response):
if self.show_profile(request):
stats = self.stats()
if 'prof_strip' in request.GET:
stats.strip_dirs()
if 'prof_sort' in request.GET:
# See # http://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
# for the fields you can sort on.
stats.sort_stats(*request.GET['prof_sort'].split(','))
else:
stats.sort_stats('time', 'calls')
# Capture STDOUT temporarily
old_stdout = sys.stdout
out = StringIO()
sys.stdout = out
stats.print_stats()
stats_str = out.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# Print status within PRE block
if response and response.content and stats_str:
response.content = "<pre>" + stats_str + "</pre>"
return response
class ProfileMiddleware(BaseMiddleware):
query_param = 'cprofile'
def profiler(self):
return cProfile.Profile()
def stats(self):
self.profile.dump_stats(self.tmpfile.name)
return pstats.Stats(self.tmpfile.name)
class HotshotMiddleware(BaseMiddleware):
"""
Displays hotshot profiling for any view.
http://yoursite.com/yourview/?prof
Based on http://djangosnippets.org/snippets/186/
"""
query_param = 'hotshot'
def profiler(self):
return hotshot.Profile(self.tmpfile.name)
def stats(self):
self.profile.close()
return hotshot.stats.load(self.tmpfile.name)
| bsd-3-clause |
adamjmcgrath/glancydesign | src/django-nonrel/django/contrib/gis/maps/google/zoom.py | 327 | 6628 | from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from math import pi, sin, cos, log, exp, atan
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixl
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
| bsd-3-clause |
varunarya10/python-novaclient | novaclient/tests/v1_1/test_quota_classes.py | 5 | 1413 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests import utils
from novaclient.tests.v1_1 import fakes
cs = fakes.FakeClient()
class QuotaClassSetsTest(utils.TestCase):
def test_class_quotas_get(self):
class_name = 'test'
cs.quota_classes.get(class_name)
cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name)
def test_update_quota(self):
q = cs.quota_classes.get('test')
q.update(cores=2)
cs.assert_called('PUT', '/os-quota-class-sets/test')
def test_refresh_quota(self):
q = cs.quota_classes.get('test')
q2 = cs.quota_classes.get('test')
self.assertEqual(q.cores, q2.cores)
q2.cores = 0
self.assertNotEqual(q.cores, q2.cores)
q2.get()
self.assertEqual(q.cores, q2.cores)
| apache-2.0 |
dhalleine/tensorflow | tensorflow/contrib/learn/python/learn/experiment.py | 1 | 6100 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.platform import tf_logging as logging
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
"""
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None):
"""Constructor for `Experiment`.
Args:
estimator: `Estimator` object.
train_input_fn: function, returns features and targets for training.
eval_input_fn: function, returns features and targets for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
"""
super(Experiment, self).__init__()
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._train_monitors = train_monitors
def train(self, delay_secs=0):
"""Fit the estimator using the training data.
Train the estimator for `steps` steps, after waiting for `delay_secs`
seconds. If `steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
if delay_secs:
logging.info("Waiting %d secs before starting training.", delay_secs)
time.sleep(delay_secs)
return self._estimator.fit(input_fn=self._train_input_fn,
steps=self._train_steps,
monitors=self._train_monitors)
def evaluate(self, delay_secs=0):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. If `steps`
is given, only run for this many steps. Otherwise run until input is
exhausted, or another exception is raised. Start the evaluation after
`delay_secs` seconds.
Args:
delay_secs: Start evaluating after waiting for this many seconds.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass")
def local_run(self):
"""Run when called on local machine.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
# TODO(ipolosukhin): Add a ValidationMonitor to run in-training evaluation.
self.train()
return self.evaluate()
def _continuous_eval(self,
input_fn,
name,
delay_secs=0,
throttle_delay_secs=60):
"""Run continuous eval.
Run `steps` steps of evaluation on the evaluation data set. This function
starts evaluating after `delay_secs` seconds and then runs no more than one
evaluation per `throttle_delay_secs`. It never returns.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago.
"""
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
while True:
start = time.time()
self._estimator.evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name)
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def continuous_eval(self, delay_secs=0, throttle_delay_secs=60):
self._continuous_eval(self._eval_input_fn,
name="continuous",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def continuous_eval_on_train_data(self, delay_secs=0, throttle_delay_secs=60):
self._continuous_eval(self._train_input_fn,
name="continuous_on_train_data",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
| apache-2.0 |
chinesebear/NB-IOT | rtt2.1/bsp/smartloong/rtconfig.py | 3 | 1461 | import os
# CPU options
ARCH='mips'
CPU ='loongson_1c'
# toolchains options
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:\mgc\embedded\codebench\bin'
else:
print '================ERROR==========================='
print 'Not support %s yet!' % CROSS_TOOL
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
PREFIX = 'mips-sde-elf-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
READELF = PREFIX + 'readelf'
DEVICE = ' -mips32'
CFLAGS = DEVICE + ' -EL -G0 -mno-abicalls -fno-pic -fno-builtin -fno-exceptions -ffunction-sections -fomit-frame-pointer'
AFLAGS = ' -c' + DEVICE + ' -EL -fno-pic -fno-builtin -mno-abicalls -x assembler-with-cpp -DSYSTEM_STACK=0x80003fe8'
LFLAGS = DEVICE + ' -nostartfiles -EL -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T ls1c_ram.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n'
READELF_ACTION = READELF + ' -a $TARGET > rtt.map\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
| mit |
kalvdans/scipy | scipy/special/__init__.py | 3 | 27099 | """
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are
noted.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings when an error
occurs. By default this is disabled; to enable it use `errprint`.
.. autosummary::
:toctree: generated/
errprint -- Set or return the error printing flag for special functions.
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1
ellipkinc -- Incomplete elliptic integral of the first kind
ellipe -- Complete elliptic integral of the second kind
ellipeinc -- Incomplete elliptic integral of the second kind
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and complex argument.
jn -- Bessel function of the first kind of real order and complex argument
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and real argument.
yv -- Bessel function of the second kind of real order and complex argument.
yve -- Exponentially scaled Bessel function of the second kind of real order.
kn -- Modified Bessel function of the second kind of integer order `n`
kv -- Modified Bessel function of the second kind of real order `v`
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the first kind
hankel1 -- Hankel function of the first kind
hankel1e -- Exponentially scaled Hankel function of the first kind
hankel2 -- Hankel function of the second kind
hankel2e -- Exponentially scaled Hankel function of the second kind
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0
it2j0y0 -- Integrals related to Bessel functions of order 0
iti0k0 -- Integrals of modified Bessel functions of order 0
it2i0k0 -- Integrals related to modified Bessel functions of order 0
besselpoly -- [+]Weighted integral of a Bessel function.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative density function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x)
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd
gdtr -- Gamma distribution cumulative density function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function
pdtrc -- Poisson survival function
pdtri -- Inverse to `pdtr` vs m
pdtrik -- Inverse to `pdtr` vs k
stdtr -- Student t distribution cumulative density function
stdtridf -- Inverse of `stdtr` vs df
stdtrit -- Inverse of `stdtr` vs `t`
chdtr -- Chi square cumulative distribution function
chdtrc -- Chi square survival function
chdtri -- Inverse to `chdtrc`
chdtriv -- Inverse to `chdtr` vs `v`
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x
chndtr -- Non-central chi square cumulative distribution function
chndtridf -- Inverse to `chndtr` vs `df`
chndtrinc -- Inverse to `chndtr` vs `nc`
chndtrix -- Inverse to `chndtr` vs `x`
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function
smirnovi -- Inverse to `smirnov`
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution
kolmogi -- Inverse function to kolmogorov
tklmbda -- Tukey-Lambda cumulative distribution function
logit -- Logit ufunc for ndarrays.
expit -- Expit ufunc for ndarrays.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out])
poch -- Rising factorial (z)_m
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals
modfresnelm -- Modified Fresnel negative integrals
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z).
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre function of the first kind for complex arguments.
lpn -- [+]Legendre function of the first kind.
lqn -- [+]Legendre function of the second kind.
lpmn -- [+]Sequence of associated Legendre functions of the first kind.
lqmn -- [+]Sequence of associated Legendre functions of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l)
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l)
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial.
chebyt -- [+]Chebyshev polynomial of the first kind.
chebyu -- [+]Chebyshev polynomial of the second kind.
chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- [+]Jacobi polynomial.
laguerre -- [+]Laguerre polynomial.
genlaguerre -- [+]Generalized (associated) Laguerre polynomial.
hermite -- [+]Physicist's Hermite polynomial.
hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial.
gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial.
sh_legendre -- [+]Shifted Legendre polynomial.
sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- [+]Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x)
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind
hyp0f1 -- Confluent hypergeometric limit function 0F1.
hyp2f0 -- Hypergeometric function 2F0 in y and an error estimate
hyp1f2 -- Hypergeometric function 1F2 and error estimate
hyp3f0 -- Hypergeometric function 3F0 in y and an error estimate
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D
pbvv -- Parabolic cylinder function V
pbwa -- Parabolic cylinder function W
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions
mathieu_b -- Characteristic value of odd Mathieu functions
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative
mathieu_sem -- Odd Mathieu function and its derivative
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function
obl_cv -- Characteristic value of oblate spheroidal function
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers
kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`
beip -- Derivative of the Kelvin function `bei`
ker -- Kelvin function ker
kei -- Kelvin function ker
kerp -- Derivative of the Kelvin function ker
keip -- Derivative of the Kelvin function kei
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]The number of combinations of N things taken k at a time.
perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and Related Functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n
exp1 -- Exponential integral E_1 of complex argument z
expi -- Exponential integral Ei
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- [+]Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`
exp10 -- 10**x
exp2 -- 2**x
radian -- Convert from degrees to radians
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero
expm1 -- exp(x) - 1 for use when `x` is near zero.
cosm1 -- cos(x) - 1 for use when `x` is near zero.
round -- Round to nearest integer
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from .basic import *
from ._logsumexp import logsumexp
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,
spherical_kn)
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
angelapper/edx-platform | cms/djangoapps/contentstore/views/preview.py | 6 | 12359 | from __future__ import absolute_import
import logging
from functools import partial
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseBadRequest
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import UsageKey
from xblock.django.request import django_to_webob_request, webob_to_django_response
from xblock.exceptions import NoSuchHandlerError
from xblock.fragment import Fragment
from xblock.runtime import KvsFieldData
import static_replace
from cms.lib.xblock.field_data import CmsFieldData
from contentstore.utils import get_visibility_partition_info
from contentstore.views.access import get_user_role
from edxmako.shortcuts import render_to_string
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from openedx.core.lib.license import wrap_with_license
from openedx.core.lib.xblock_utils import (
replace_static_urls,
request_token,
wrap_fragment,
wrap_xblock,
wrap_xblock_aside,
xblock_local_resource_url
)
from util.sandboxing import can_execute_unsafe_code, get_python_lib_zip
from xblock_config.models import StudioConfig
from xblock_django.user_service import DjangoXBlockUserService
from xmodule.contentstore.django import contentstore
from xmodule.error_module import ErrorDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
from xmodule.modulestore.django import ModuleI18nService, modulestore
from xmodule.partitions.partitions_service import PartitionService
from xmodule.services import SettingsService
from xmodule.studio_editable import has_author_view
from xmodule.x_module import AUTHOR_VIEW, PREVIEW_VIEWS, STUDENT_VIEW, ModuleSystem
from .helpers import render_from_lms
from .session_kv_store import SessionKeyValueStore
__all__ = ['preview_handler']
log = logging.getLogger(__name__)
@login_required
def preview_handler(request, usage_key_string, handler, suffix=''):
"""
Dispatch an AJAX action to an xblock
usage_key_string: The usage_key_string-id of the block to dispatch to, passed through `quote_slashes`
handler: The handler to execute
suffix: The remainder of the url to be passed to the handler
"""
usage_key = UsageKey.from_string(usage_key_string)
descriptor = modulestore().get_item(usage_key)
instance = _load_preview_module(request, descriptor)
# Let the module handle the AJAX
req = django_to_webob_request(request)
try:
resp = instance.handle(handler, req, suffix)
except NoSuchHandlerError:
log.exception("XBlock %s attempted to access missing handler %r", instance, handler)
raise Http404
except NotFoundError:
log.exception("Module indicating to user that request doesn't exist")
raise Http404
except ProcessingError:
log.warning("Module raised an error while processing AJAX request",
exc_info=True)
return HttpResponseBadRequest()
except Exception:
log.exception("error processing ajax call")
raise
return webob_to_django_response(resp)
class PreviewModuleSystem(ModuleSystem): # pylint: disable=abstract-method
"""
An XModule ModuleSystem for use in Studio previews
"""
# xmodules can check for this attribute during rendering to determine if
# they are being rendered for preview (i.e. in Studio)
is_author_mode = True
def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):
return reverse('preview_handler', kwargs={
'usage_key_string': unicode(block.scope_ids.usage_id),
'handler': handler_name,
'suffix': suffix,
}) + '?' + query
def local_resource_url(self, block, uri):
return xblock_local_resource_url(block, uri)
def applicable_aside_types(self, block):
"""
Remove acid_aside and honor the config record
"""
if not StudioConfig.asides_enabled(block.scope_ids.block_type):
return []
# TODO: aside_type != 'acid_aside' check should be removed once AcidBlock is only installed during tests
# (see https://openedx.atlassian.net/browse/TE-811)
return [
aside_type
for aside_type in super(PreviewModuleSystem, self).applicable_aside_types(block)
if aside_type != 'acid_aside'
]
def render_child_placeholder(self, block, view_name, context):
"""
Renders a placeholder XBlock.
"""
return self.wrap_xblock(block, view_name, Fragment(), context)
def layout_asides(self, block, context, frag, view_name, aside_frag_fns):
position_for_asides = '<!-- footer for xblock_aside -->'
result = Fragment()
result.add_frag_resources(frag)
for aside, aside_fn in aside_frag_fns:
aside_frag = aside_fn(block, context)
if aside_frag.content != u'':
aside_frag_wrapped = self.wrap_aside(block, aside, view_name, aside_frag, context)
aside.save()
result.add_frag_resources(aside_frag_wrapped)
replacement = position_for_asides + aside_frag_wrapped.content
frag.content = frag.content.replace(position_for_asides, replacement)
result.add_content(frag.content)
return result
def _preview_module_system(request, descriptor, field_data):
"""
Returns a ModuleSystem for the specified descriptor that is specialized for
rendering module previews.
request: The active django request
descriptor: An XModuleDescriptor
"""
course_id = descriptor.location.course_key
display_name_only = (descriptor.category == 'static_tab')
wrappers = [
# This wrapper wraps the module in the template specified above
partial(
wrap_xblock,
'PreviewRuntime',
display_name_only=display_name_only,
usage_id_serializer=unicode,
request_token=request_token(request)
),
# This wrapper replaces urls in the output that start with /static
# with the correct course-specific url for the static content
partial(replace_static_urls, None, course_id=course_id),
_studio_wrap_xblock,
]
wrappers_asides = [
partial(
wrap_xblock_aside,
'PreviewRuntime',
usage_id_serializer=unicode,
request_token=request_token(request)
)
]
if settings.FEATURES.get("LICENSING", False):
# stick the license wrapper in front
wrappers.insert(0, wrap_with_license)
return PreviewModuleSystem(
static_url=settings.STATIC_URL,
# TODO (cpennington): Do we want to track how instructors are using the preview problems?
track_function=lambda event_type, event: None,
filestore=descriptor.runtime.resources_fs,
get_module=partial(_load_preview_module, request),
render_template=render_from_lms,
debug=True,
replace_urls=partial(static_replace.replace_static_urls, data_directory=None, course_id=course_id),
user=request.user,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)),
mixins=settings.XBLOCK_MIXINS,
course_id=course_id,
anonymous_student_id='student',
# Set up functions to modify the fragment produced by student_view
wrappers=wrappers,
wrappers_asides=wrappers_asides,
error_descriptor_class=ErrorDescriptor,
get_user_role=lambda: get_user_role(request.user, course_id),
# Get the raw DescriptorSystem, not the CombinedSystem
descriptor_runtime=descriptor._runtime, # pylint: disable=protected-access
services={
"field-data": field_data,
"i18n": ModuleI18nService,
"settings": SettingsService(),
"user": DjangoXBlockUserService(request.user),
"partitions": StudioPartitionService(course_id=course_id)
},
)
class StudioPartitionService(PartitionService):
"""
A runtime mixin to allow the display and editing of component visibility based on user partitions.
"""
def get_user_group_id_for_partition(self, user, user_partition_id):
"""
Override this method to return None, as the split_test_module calls this
to determine which group a user should see, but is robust to getting a return
value of None meaning that all groups should be shown.
"""
return None
def _load_preview_module(request, descriptor):
"""
Return a preview XModule instantiated from the supplied descriptor. Will use mutable fields
if XModule supports an author_view. Otherwise, will use immutable fields and student_view.
request: The active django request
descriptor: An XModuleDescriptor
"""
student_data = KvsFieldData(SessionKeyValueStore(request))
if has_author_view(descriptor):
wrapper = partial(CmsFieldData, student_data=student_data)
else:
wrapper = partial(LmsFieldData, student_data=student_data)
# wrap the _field_data upfront to pass to _preview_module_system
wrapped_field_data = wrapper(descriptor._field_data) # pylint: disable=protected-access
preview_runtime = _preview_module_system(request, descriptor, wrapped_field_data)
descriptor.bind_for_student(
preview_runtime,
request.user.id,
[wrapper]
)
return descriptor
def _is_xblock_reorderable(xblock, context):
"""
Returns true if the specified xblock is in the set of reorderable xblocks
otherwise returns false.
"""
try:
return xblock.location in context['reorderable_items']
except KeyError:
return False
# pylint: disable=unused-argument
def _studio_wrap_xblock(xblock, view, frag, context, display_name_only=False):
"""
Wraps the results of rendering an XBlock view in a div which adds a header and Studio action buttons.
"""
# Only add the Studio wrapper when on the container page. The "Pages" page will remain as is for now.
if not context.get('is_pages_view', None) and view in PREVIEW_VIEWS:
root_xblock = context.get('root_xblock')
is_root = root_xblock and xblock.location == root_xblock.location
is_reorderable = _is_xblock_reorderable(xblock, context)
selected_groups_label = get_visibility_partition_info(xblock)['selected_groups_label']
if selected_groups_label:
selected_groups_label = _('Access restricted to: {list_of_groups}').format(list_of_groups=selected_groups_label)
course = modulestore().get_course(xblock.location.course_key)
template_context = {
'xblock_context': context,
'xblock': xblock,
'show_preview': context.get('show_preview', True),
'content': frag.content,
'is_root': is_root,
'is_reorderable': is_reorderable,
'can_edit': context.get('can_edit', True),
'can_edit_visibility': context.get('can_edit_visibility', True),
'selected_groups_label': selected_groups_label,
'can_add': context.get('can_add', True),
'can_move': context.get('can_move', True),
'language': getattr(course, 'language', None)
}
html = render_to_string('studio_xblock_wrapper.html', template_context)
frag = wrap_fragment(frag, html)
return frag
def get_preview_fragment(request, descriptor, context):
"""
Returns the HTML returned by the XModule's student_view or author_view (if available),
specified by the descriptor and idx.
"""
module = _load_preview_module(request, descriptor)
preview_view = AUTHOR_VIEW if has_author_view(module) else STUDENT_VIEW
try:
fragment = module.render(preview_view, context)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to render %s for %r", preview_view, module, exc_info=True)
fragment = Fragment(render_to_string('html_error.html', {'message': str(exc)}))
return fragment
| agpl-3.0 |
osuripple/pep.py | constants/packetIDs.py | 1 | 2252 | """Contain server and client packet IDs"""
client_changeAction = 0
client_sendPublicMessage = 1
client_logout = 2
client_requestStatusUpdate = 3
server_userID = 5
server_sendMessage = 7
server_userStats = 11
server_userLogout = 12
server_spectatorJoined = 13
server_spectatorLeft = 14
server_spectateFrames = 15
client_startSpectating = 16
client_stopSpectating = 17
client_spectateFrames = 18
client_cantSpectate = 21
server_spectatorCantSpectate = 22
server_notification = 24
client_sendPrivateMessage = 25
server_updateMatch = 26
server_newMatch = 27
server_disposeMatch = 28
client_partLobby = 29
client_joinLobby = 30
client_createMatch = 31
client_joinMatch = 32
client_partMatch = 33
server_matchJoinSuccess = 36
server_matchJoinFail = 37
client_matchChangeSlot = 38
client_matchReady = 39
client_matchLock = 40
client_matchChangeSettings = 41
server_fellowSpectatorJoined = 42
server_fellowSpectatorLeft = 43
client_matchStart = 44
server_matchStart = 46
client_matchScoreUpdate = 47
server_matchScoreUpdate = 48
client_matchComplete = 49
server_matchTransferHost = 50
client_matchChangeMods = 51
client_matchLoadComplete = 52
server_matchAllPlayersLoaded = 53
client_matchNoBeatmap = 54
client_matchNotReady = 55
client_matchFailed = 56
server_matchPlayerFailed = 57
server_matchComplete = 58
client_matchHasBeatmap = 59
client_matchSkipRequest = 60
server_matchSkip = 61
client_channelJoin = 63
server_channelJoinSuccess = 64
server_channelInfo = 65
server_channelKicked = 66
client_matchTransferHost = 70
server_supporterGMT = 71
server_friendsList = 72
client_friendAdd = 73
client_friendRemove = 74
server_protocolVersion = 75
server_mainMenuIcon = 76
client_matchChangeTeam = 77
client_channelPart = 78
server_matchPlayerSkipped = 81
client_setAwayMessage = 82
server_userPanel = 83
client_userStatsRequest = 85
server_restart = 86
client_invite = 87
server_invite = 88
server_channelInfoEnd = 89
client_matchChangePassword = 90
server_matchChangePassword = 91
server_silenceEnd = 92
server_userSilenced = 94
server_userPresenceBundle = 96
client_userPanelRequest = 97
client_tournamentMatchInfoRequest = 93
server_matchAbort = 106
server_switchServer = 107
client_tournamentJoinMatchChannel = 108
client_tournamentLeaveMatchChannel = 109 | agpl-3.0 |
nkfly/vm-hw1 | QMP/qmp.py | 13 | 2083 | # QEMU Monitor Protocol Python class
#
# Copyright (C) 2009 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <lcapitulino@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import socket, json
class QMPError(Exception):
pass
class QMPConnectError(QMPError):
pass
class QEMUMonitorProtocol:
def connect(self):
self.sock.connect(self.filename)
data = self.__json_read()
if data == None:
raise QMPConnectError
if not data.has_key('QMP'):
raise QMPConnectError
return data['QMP']['capabilities']
def close(self):
self.sock.close()
def send_raw(self, line):
self.sock.send(str(line))
return self.__json_read()
def send(self, cmdline):
cmd = self.__build_cmd(cmdline)
self.__json_send(cmd)
resp = self.__json_read()
if resp == None:
return
elif resp.has_key('error'):
return resp['error']
else:
return resp['return']
def __build_cmd(self, cmdline):
cmdargs = cmdline.split()
qmpcmd = { 'execute': cmdargs[0], 'arguments': {} }
for arg in cmdargs[1:]:
opt = arg.split('=')
try:
value = int(opt[1])
except ValueError:
value = opt[1]
qmpcmd['arguments'][opt[0]] = value
return qmpcmd
def __json_send(self, cmd):
# XXX: We have to send any additional char, otherwise
# the Server won't read our input
self.sock.send(json.dumps(cmd) + ' ')
def __json_read(self):
try:
while True:
line = json.loads(self.sockfile.readline())
if not 'event' in line:
return line
except ValueError:
return
def __init__(self, filename):
self.filename = filename
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sockfile = self.sock.makefile()
| gpl-2.0 |
faroit/loudness | python/tests/test_OME.py | 1 | 2084 | import numpy as np
import matplotlib.pyplot as plt
import loudness as ln
def plotResponse(freqPoints, dataPoints,
freqsInterp, responseInterp,
ylim=(-40, 10), title = ""):
if np.any(dataPoints):
plt.semilogx(freqPoints, dataPoints, 'o')
plt.semilogx(freqsInterp, responseInterp)
plt.xlim(20, 20e3)
plt.ylim(ylim)
plt.xlabel("Frequency, Hz")
plt.ylabel("Response, dB")
plt.title(title)
plt.show()
def plotMiddleEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(filterType, ln.OME.NONE)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getMiddleEarFreqPoints()
dataPoints = ome.getMiddleEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotOuterEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(ln.OME.NONE, filterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getOuterEarFreqPoints()
dataPoints = ome.getOuterEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotCombined(middleFilterType, outerFilterType, ylim=(-40, 10)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(middleFilterType, outerFilterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
plotResponse(None, None,
freqs, response, ylim)
plt.figure(1)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR, (-40, 0))
plt.figure(2)
plotMiddleEar(ln.OME.CHGM2011_MIDDLE_EAR, (-40, 10))
plt.figure(2)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR_HPF, (-40, 0))
plt.figure(3)
plotOuterEar(ln.OME.ANSIS342007_FREEFIELD, (-5, 20))
plt.figure(4)
plotOuterEar(ln.OME.ANSIS342007_DIFFUSEFIELD, (-5, 20))
plt.figure(5)
plotOuterEar(ln.OME.BD_DT990, (-10, 10))
plt.figure(6)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR,
ln.OME.ANSIS342007_FREEFIELD, (-40, 10))
plt.figure(7)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR, ln.OME.BD_DT990, (-40, 10))
| gpl-3.0 |
SteveXiSong/ECE757-SnoopingPredictions | src/mem/slicc/ast/MethodCallExprAST.py | 32 | 8372 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
class MethodCallExprAST(ExprAST):
def __init__(self, slicc, proc_name, expr_ast_vec):
super(MethodCallExprAST, self).__init__(slicc)
self.proc_name = proc_name
self.expr_ast_vec = expr_ast_vec
def generate(self, code):
tmp = self.slicc.codeFormatter()
paramTypes = []
for expr_ast in self.expr_ast_vec:
return_type = expr_ast.generate(tmp)
paramTypes.append(return_type)
obj_type, methodId, prefix = self.generate_prefix(paramTypes)
# generate code
params = []
for expr_ast in self.expr_ast_vec:
return_type,tcode = expr_ast.inline(True)
params.append(str(tcode))
fix = code.nofix()
code("$prefix${{self.proc_name}}(${{', '.join(params)}}))")
code.fix(fix)
# Verify that this is a method of the object
if methodId not in obj_type.methods:
self.error("Invalid method call: Type '%s' does not have a method '%s'",
obj_type, methodId)
if len(self.expr_ast_vec) != \
len(obj_type.methods[methodId].param_types):
# Right number of parameters
self.error("Wrong number of parameters for function name: '%s', " + \
"expected: , actual: ", proc_name,
len(obj_type.methods[methodId].param_types),
len(self.expr_ast_vec))
for actual_type, expected_type in \
zip(paramTypes, obj_type.methods[methodId].param_types):
if actual_type != expected_type and \
str(actual_type["interface"]) != str(expected_type):
self.error("Type mismatch: expected: %s actual: %s",
expected_type, actual_type)
# Return the return type of the method
return obj_type.methods[methodId].return_type
def findResources(self, resources):
pass
class MemberMethodCallExprAST(MethodCallExprAST):
def __init__(self, slicc, obj_expr_ast, proc_name, expr_ast_vec):
s = super(MemberMethodCallExprAST, self)
s.__init__(slicc, proc_name, expr_ast_vec)
self.obj_expr_ast = obj_expr_ast
def __repr__(self):
return "[MethodCallExpr: %r%r %r]" % (self.proc_name,
self.obj_expr_ast,
self.expr_ast_vec)
def generate_prefix(self, paramTypes):
code = self.slicc.codeFormatter()
# member method call
obj_type = self.obj_expr_ast.generate(code)
methodId = obj_type.methodId(self.proc_name, paramTypes)
prefix = ""
implements_interface = False
if methodId in obj_type.methods:
return_type = obj_type.methods[methodId].return_type
else:
#
# Check whether the method is implemented by the super class
if "interface" in obj_type:
interface_type = self.symtab.find(obj_type["interface"]);
if methodId in interface_type.methods:
return_type = interface_type.methods[methodId].return_type
obj_type = interface_type
else:
self.error("Invalid method call: " \
"Type '%s' does not have a method %s, '%s'",
obj_type, self.proc_name, methodId)
else:
#
# The initial method check has failed, but before generating an
# error we must check whether any of the paramTypes implement
# an interface. If so, we must check if the method ids using
# the inherited types exist.
#
# This code is a temporary fix and only checks for the methodId
# where all paramTypes are converted to their inherited type. The
# right way to do this is to replace slicc's simple string
# comparison for determining the correct overloaded method, with a
# more robust param by param check.
#
implemented_paramTypes = []
for paramType in paramTypes:
implemented_paramType = paramType
if paramType.isInterface:
implements_interface = True
implemented_paramType.abstract_ident = paramType["interface"]
else:
implemented_paramType.abstract_ident = paramType.c_ident
implemented_paramTypes.append(implemented_paramType)
implementedMethodId = ""
if implements_interface:
implementedMethodId = obj_type.methodIdAbstract(
self.proc_name, implemented_paramTypes)
if implementedMethodId not in obj_type.methods:
self.error("Invalid method call: Type '%s' " \
"does not have a method %s, '%s' nor '%s'",
obj_type, self.proc_name, methodId,
implementedMethodId)
# Replace the methodId with the implementedMethodId
# found in the method list.
methodId = implementedMethodId
return_type = obj_type.methods[methodId].return_type
if return_type.isInterface:
prefix = "static_cast<%s &>" % return_type.c_ident
if str(obj_type) == "AbstractCacheEntry" or \
str(obj_type) == "AbstractEntry" or \
("interface" in obj_type and (
obj_type["interface"] == "AbstractCacheEntry" or
obj_type["interface"] == "AbstractEntry")):
prefix = "%s((*(%s))." % (prefix, code)
else:
prefix = "%s((%s)." % (prefix, code)
return obj_type, methodId, prefix
class ClassMethodCallExprAST(MethodCallExprAST):
def __init__(self, slicc, type_ast, proc_name, expr_ast_vec):
s = super(ClassMethodCallExprAST, self)
s.__init__(slicc, proc_name, expr_ast_vec)
self.type_ast = type_ast
def __repr__(self):
return "[MethodCallExpr: %r %r]" % (self.proc_name, self.expr_ast_vec)
def generate_prefix(self, paramTypes):
# class method call
prefix = "(%s::" % self.type_ast
obj_type = self.type_ast.type
methodId = obj_type.methodId(self.proc_name, paramTypes)
return obj_type, methodId, prefix
__all__ = [ "MemberMethodCallExprAST", "ClassMethodCallExprAST" ]
| bsd-3-clause |
ThinkOpen-Solutions/odoo | addons/survey/wizard/__init__.py | 385 | 1026 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_email_compose_message
| agpl-3.0 |
nelsonsar/ansible | lib/ansible/module_utils/splitter.py | 372 | 8425 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are donei
params = []
# here we encode the args, so we have a uniform charset to
# work with, and split on white space
args = args.strip()
try:
args = args.encode('utf-8')
do_decode = True
except UnicodeDecodeError:
do_decode = False
items = args.split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and not inside_quotes and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
spacer = ''
if not params[-1].endswith('\n') and idx == 0:
spacer = '\n'
params[-1] = "%s%s%s" % (params[-1], spacer, token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
if not params[-1].endswith('\n') or item == '':
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
# finally, we decode each param back to the unicode it was in the arg string
if do_decode:
params = [x.decode('utf-8') for x in params]
return params
def is_quoted(data):
return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 |
moto-timo/ironpython3 | Src/StdLib/Lib/_dummy_thread.py | 106 | 4872 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
def _set_sentinel():
"""Dummy implementation of _thread._set_sentinel()."""
return LockType()
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| apache-2.0 |
amit0701/rally | rally/plugins/openstack/scenarios/ceilometer/events.py | 14 | 2958 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.ceilometer import utils as cutils
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.task import validation
class CeilometerEvents(cutils.CeilometerScenario, kutils.KeystoneScenario):
"""Benchmark scenarios for Ceilometer Events API."""
# NOTE(idegtiarov): to work with event we need to create it, there are
# no other way except emit suitable notification from one of services,
# for example create new user in keystone.
@validation.required_services(consts.Service.CEILOMETER,
consts.Service.KEYSTONE)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"],
"cleanup": ["ceilometer"]})
def create_user_and_list_events(self):
"""Create user and fetch all events.
This scenario creates user to store new event and
fetches list of all events using GET /v2/events.
"""
self._user_create()
self._list_events()
@validation.required_services(consts.Service.CEILOMETER,
consts.Service.KEYSTONE)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"],
"cleanup": ["ceilometer"]})
def create_user_and_list_event_types(self):
"""Create user and fetch all event types.
This scenario creates user to store new event and
fetches list of all events types using GET /v2/event_types.
"""
self._user_create()
self._list_event_types()
@validation.required_services(consts.Service.CEILOMETER,
consts.Service.KEYSTONE)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["keystone"],
"cleanup": ["ceilometer"]})
def create_user_and_get_event(self):
"""Create user and gets event.
This scenario creates user to store new event and
fetches one event using GET /v2/events/<message_id>.
"""
self._user_create()
event = self._list_events()[0]
self._get_event(event_id=event.message_id)
| apache-2.0 |
yestech/gae-django-template | djangoappengine/management/commands/runserver.py | 6 | 6549 | from optparse import make_option
import logging
import sys
from django.db import connections
from ...boot import PROJECT_DIR
from ...db.base import DatabaseWrapper, get_datastore_paths
from django.core.management.base import BaseCommand
from django.core.management.commands.runserver import BaseRunserverCommand
from django.core.exceptions import ImproperlyConfigured
from google.appengine.tools import dev_appserver_main
class Command(BaseRunserverCommand):
"""Overrides the default Django runserver command.
Instead of starting the default Django development server this command
fires up a copy of the full fledged App Engine dev_appserver that emulates
the live environment your application will be deployed to.
"""
option_list = BaseCommand.option_list + (
make_option('--debug', action='store_true', default=False,
help='Prints verbose debugging messages to the console while running.'),
make_option('--debug_imports', action='store_true', default=False,
help='Prints debugging messages related to importing modules, including \
search paths and errors.'),
make_option('-c', '--clear_datastore', action='store_true', default=False,
help='Clears the datastore data and history files before starting the web server.'),
make_option('--high_replication', action='store_true', default=False,
help='Use the high replication datastore consistency model.'),
make_option('--require_indexes', action='store_true', default=False,
help="""Disables automatic generation of entries in the index.yaml file. Instead, when
the application makes a query that requires that its index be defined in the
file and the index definition is not found, an exception will be raised,
similar to what would happen when running on App Engine."""),
make_option('--enable_sendmail', action='store_true', default=False,
help='Uses the local computer\'s Sendmail installation for sending email messages.'),
make_option('--datastore_path',
help="""The path to use for the local datastore data file. The server creates this file
if it does not exist."""),
make_option('--history_path',
help="""The path to use for the local datastore history file. The server uses the query
history file to generate entries for index.yaml."""),
make_option('--login_url',
help='The relative URL to use for the Users sign-in page. Default is /_ah/login.'),
make_option('--smtp_host',
help='The hostname of the SMTP server to use for sending email messages.'),
make_option('--smtp_port',
help='The port number of the SMTP server to use for sending email messages.'),
make_option('--smtp_user',
help='The username to use with the SMTP server for sending email messages.'),
make_option('--smtp_password',
help='The password to use with the SMTP server for sending email messages.'),
)
help = 'Runs a copy of the App Engine development server.'
args = '[optional port number, or ipaddr:port]'
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
# hack __main__ so --help in dev_appserver_main works OK.
sys.modules['__main__'] = dev_appserver_main
return super(Command, self).create_parser(prog_name, subcommand)
def run_from_argv(self, argv):
"""
Captures the program name, usually "manage.py"
"""
self.progname = argv[0]
super(Command, self).run_from_argv(argv)
def run(self, *args, **options):
"""
Starts the App Engine dev_appserver program for the Django project.
The appserver is run with default parameters. If you need to pass any special
parameters to the dev_appserver you will have to invoke it manually.
Unlike the normal devserver, does not use the autoreloader as
App Engine dev_appserver needs to be run from the main thread
"""
args = []
# Set bind ip/port if specified.
if self.addr:
args.extend(["--address", self.addr])
if self.port:
args.extend(["--port", self.port])
# If runserver is called using handle(), progname will not be set
if not hasattr(self, 'progname'):
self.progname = "manage.py"
# Add email settings
from django.conf import settings
if not options.get('smtp_host', None) and not options.get('enable_sendmail', None):
args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Pass the application specific datastore location to the server.
for name in connections:
connection = connections[name]
if isinstance(connection, DatabaseWrapper):
for key, path in get_datastore_paths(connection.settings_dict).items():
# XXX/TODO: Remove this when SDK 1.4.3 is released
if key == 'prospective_search_path':
continue
arg = '--' + key
if arg not in args:
args.extend([arg, path])
break
# Process the rest of the options here
bool_options = ['debug', 'debug_imports', 'clear_datastore', 'require_indexes',
'high_replication', 'enable_sendmail', ]
for opt in bool_options:
if options[opt] != False:
args.append("--%s" % opt)
str_options = ['datastore_path', 'history_path', 'login_url', 'smtp_host', 'smtp_port',
'smtp_user', 'smtp_password',]
for opt in str_options:
if options.get(opt, None) != None:
args.extend(["--%s" % opt, options[opt]])
# Reset logging level to INFO as dev_appserver will spew tons of debug logs
logging.getLogger().setLevel(logging.INFO)
# Append the current working directory to the arguments.
dev_appserver_main.main([self.progname] + args + [PROJECT_DIR])
| bsd-3-clause |
TRESCLOUD/odoo | openerp/report/render/rml2pdf/utils.py | 48 | 7022 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2003, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
try:
txt = None
expr = sps.pop(0)
txt = eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
pass
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lukeiwanski/tensorflow | tensorflow/contrib/copy_graph/__init__.py | 55 | 1187 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to copy elements between graphs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.copy_graph.python.util import copy_elements
# pylint: disable=wildcard-import
from tensorflow.contrib.copy_graph.python.util.copy_elements import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, doc_string_modules=[copy_elements])
| apache-2.0 |
amenonsen/ansible | lib/ansible/modules/cloud/misc/xenserver_facts.py | 52 | 5155 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: xenserver_facts
version_added: "2.0"
short_description: get facts reported on xenserver
description:
- Reads data out of XenAPI, can be used instead of multiple xe commands.
author:
- Andy Hill (@andyhky)
- Tim Rupp (@caphrim007)
- Robin Lee (@cheese)
options: {}
'''
EXAMPLES = '''
- name: Gather facts from xenserver
xenserver_facts:
- name: Print running VMs
debug:
msg: "{{ item }}"
with_items: "{{ xs_vms.keys() }}"
when: xs_vms[item]['power_state'] == "Running"
# Which will print:
#
# TASK: [Print running VMs] ***********************************************************
# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
# "item": "Control domain on host: 10.0.13.22",
# "msg": "Control domain on host: 10.0.13.22"
# }
'''
HAVE_XENAPI = False
try:
import XenAPI
HAVE_XENAPI = True
except ImportError:
pass
from ansible.module_utils import distro
from ansible.module_utils.basic import AnsibleModule
class XenServerFacts:
def __init__(self):
self.codes = {
'5.5.0': 'george',
'5.6.100': 'oxford',
'6.0.0': 'boston',
'6.1.0': 'tampa',
'6.2.0': 'clearwater'
}
@property
def version(self):
result = distro.linux_distribution()[1]
return result
@property
def codename(self):
if self.version in self.codes:
result = self.codes[self.version]
else:
result = None
return result
def get_xenapi_session():
session = XenAPI.xapi_local()
session.xenapi.login_with_password('', '')
return session
def get_networks(session):
recs = session.xenapi.network.get_all_records()
networks = change_keys(recs, key='name_label')
return networks
def get_pifs(session):
recs = session.xenapi.PIF.get_all_records()
pifs = change_keys(recs, key='uuid')
xs_pifs = {}
devicenums = range(0, 7)
for pif in pifs.values():
for eth in devicenums:
interface_name = "eth%s" % (eth)
bond_name = interface_name.replace('eth', 'bond')
if pif['device'] == interface_name:
xs_pifs[interface_name] = pif
elif pif['device'] == bond_name:
xs_pifs[bond_name] = pif
return xs_pifs
def get_vlans(session):
recs = session.xenapi.VLAN.get_all_records()
return change_keys(recs, key='tag')
def change_keys(recs, key='uuid', filter_func=None):
"""
Take a xapi dict, and make the keys the value of recs[ref][key].
Preserves the ref in rec['ref']
"""
new_recs = {}
for ref, rec in recs.items():
if filter_func is not None and not filter_func(rec):
continue
for param_name, param_value in rec.items():
# param_value may be of type xmlrpc.client.DateTime,
# which is not simply convertable to str.
# Use 'value' attr to get the str value,
# following an example in xmlrpc.client.DateTime document
if hasattr(param_value, "value"):
rec[param_name] = param_value.value
new_recs[rec[key]] = rec
new_recs[rec[key]]['ref'] = ref
return new_recs
def get_host(session):
"""Get the host"""
host_recs = session.xenapi.host.get_all()
# We only have one host, so just return its entry
return session.xenapi.host.get_record(host_recs[0])
def get_vms(session):
recs = session.xenapi.VM.get_all_records()
if not recs:
return None
vms = change_keys(recs, key='name_label')
return vms
def get_srs(session):
recs = session.xenapi.SR.get_all_records()
if not recs:
return None
srs = change_keys(recs, key='name_label')
return srs
def main():
module = AnsibleModule({})
if not HAVE_XENAPI:
module.fail_json(changed=False, msg="python xen api required for this module")
obj = XenServerFacts()
try:
session = get_xenapi_session()
except XenAPI.Failure as e:
module.fail_json(msg='%s' % e)
data = {
'xenserver_version': obj.version,
'xenserver_codename': obj.codename
}
xs_networks = get_networks(session)
xs_pifs = get_pifs(session)
xs_vlans = get_vlans(session)
xs_vms = get_vms(session)
xs_srs = get_srs(session)
if xs_vlans:
data['xs_vlans'] = xs_vlans
if xs_pifs:
data['xs_pifs'] = xs_pifs
if xs_networks:
data['xs_networks'] = xs_networks
if xs_vms:
data['xs_vms'] = xs_vms
if xs_srs:
data['xs_srs'] = xs_srs
module.exit_json(ansible_facts=data)
if __name__ == '__main__':
main()
| gpl-3.0 |
sanyaade-iot/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/mbcssm.py | 168 | 17866 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
| lgpl-2.1 |
benjaminpope/whisky | Seeing/ex_atmosphere.py | 2 | 4261 | # ---------------------------------------------------- ex_atmosphere.py ----------------------------------------------
# Author: Alexey Latyshev --------------------------------------------------------------------------------------------
# ------------------- This file contains an example of atmospheric seeing modelling ----------------------------------
# ====================================================================================================================
from phasescreen import phaseScreen
from phasescreen import getPhasesEvolution
from display import displayImages
from display import displayImage
from pupil import getNHolesMask
from pupil import getAnnulusMask
from pupil import getFullMask
from sim import getFocalImage
from sim import getFocalImages
from common_tasks import mas2rad
import numpy as np
pupilSize=5.0 # pupil diameter in meters (NB: MUST be smaller than phasescreen)
scale=100.0 # scale factor (pixels/m)
v=10.0 # wind velocity (m/s)
sfrq=2 # number of samples per second
stime=2.0 # desired sampling time (0 = maximum)
atmosphereSize=10.0 # atmosphere patch size in meters
plateScale=11.5 # plate scale (mas/pixel)
wl=1e-6 #base wavelength
chip_px=512 # number of elements per chip (1dim)
exp_time=0.1 # exposure time in seconds
#----------old--------
# generating phaseScreen
#wl_delay=phaseScreen(atmosphereSize,scale,r0=0.2,seed=0,ao=0,maxVar=1e-5)
#wl_delay=phaseScreen(atmosphereSize,scale,r0=0.2,seed=0,ao=0,maxVar=0)
# converting delay to phases from 0 to 2*pi
#phases=delayToPhase(wl_delay,wl=wl)
#---------------------
# --- simulating athmosphere
# actuators number is for scale=100.0
act_num=394 # number of actuators per aperture (strehl=0.6)
# act_num=193 # number of actuators per aperture (strehl=0.4)
# act_num=99 # number of actuators per aperture (strehl=0.2)
# act_num=65 # number of actuators per aperture (strehl=0.1)
# act_num=47 # number of actuators per aperture (strehl=0.05)
# act_num=34 # number of actuators per aperture (strehl=0.02)
# act_num=28 # number of actuators per aperture (strehl=0.01)
ao=np.sqrt(act_num)/pupilSize # actuators density
# generating phaseScreen
phases=phaseScreen(atmosphereSize,scale,r0=0.2,seed=0,ao=0,maxVar=0)
#generating evolution
pupilScreens=getPhasesEvolution(phases,pupilSize,scale,v,sfrq,stime,expTime=exp_time)
# input Image
pupil = np.ones((scale*pupilSize,scale*pupilSize),dtype='complex') + 0j # plane wave
#input N holes Mask
mask_n=getNHolesMask(diam=pupilSize,holeDiam=0.5*pupilSize,holeCoords=[0.,0.,0.2*pupilSize,0.2*pupilSize],border=0.0,scale=scale)
#input annulus Mask
mask_ann=getAnnulusMask(diam=pupilSize,innDiam=0.9*pupilSize,border=0.0,scale=scale)
#input full pupil Mask
mask_full=getFullMask(diam=pupilSize,border=0.0,scale=scale)
#load jwst mask
mask_jwst=np.load('jwst_1000.npy')
# golay 9 mask
mask_golay9=getNHolesMask(diam=pupilSize*2,holeDiam=0.01*pupilSize,
holeCoords=[-2.7,-1.56,-2.7,1.56,-1.35,0.78,1.35,-3.9,1.35,-2.34,1.35,3.9,2.7,1.56,4.05,-2.34,4.05,2.34],border=0.0,scale=scale)
#activeMask
mask=mask_n
# scale factor for FFT
#scale_f=1.22*wl/(np.power(pupilSize,2)*mas2rad(plateScale)*scale)
scale_f=wl/(pupilSize*mas2rad(plateScale))
#propagating (1 image)
p=getFocalImage(pupil,mask,pupilScreens[0],scale=scale_f,cropPix=chip_px)
displayImage(p**0.1,
axisSize=[-plateScale*len(p)/2,plateScale*len(p)/2,-plateScale*len(p)/2,plateScale*len(p)/2],
xlabel='mas', ylabel='mas', title='Power Spectrum **0.1',showColorbar=True,flipY=True,cmap='gray')
# display atmosphere
displayImage(phases,axisSize=[-pupilSize/2,pupilSize/2,-pupilSize/2,pupilSize/2],xlabel='m', ylabel='m', title='Pupil Phase Screen',showColorbar=True,flipY=True)
#display mask
displayImage(mask, axisSize=[-pupilSize/2,pupilSize/2,-pupilSize/2,pupilSize/2],
xlabel='m', ylabel='m', title='Pupil Mask',showColorbar=False,flipY=True, cmap='binary_r')
# propagating (all images)
ps=getFocalImages(pupil,mask,pupilScreens,scale=scale_f,cropPix=chip_px)
displayImages(ps**0.1,delay=0.3,
axisSize=[-plateScale*len(p[0])/2,plateScale*len(p[0])/2,-plateScale*len(p[0])/2,plateScale*len(p[0])/2],
xlabel='mas', ylabel='mas', title='Power Spectrum **0.1',figureNum=1,showColorbar=True,flipY=True,cmap='gray')
| gpl-3.0 |
Boussadia/SimpleScraper | scraper.py | 1 | 6078 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import logging
import sys
import urllib
import mechanize
import cookielib
import urlparse
class Singleton(object):
_instances = {}
def __new__(class_, *args, **kwargs):
if class_ not in class_._instances:
class_._instances[class_] = super(Singleton, class_).__new__(class_, *args, **kwargs)
return class_._instances[class_]
class BaseScraper(Singleton):
"""
Base Crawler class.
The Crawler has to perform:
- GET methods
- POST methods
- handle cookies
In order to take into account for the network failures, it will handle a certain amount of retries.
Every time a page is fetched, it has to return a code as well, he are the definition of the codes:
- -1 : Network failure (after N number of attempts)
- 200 : every thing is ok
- 404 : page not found
- 500 : server error
"""
# The number of times the crawler has to retry to fetch html page when a network failure error occurs
MAX_NETWORK_FAILURE_TRIES = 10
INTERVAL = 2
# interval between 2 http request in seconds
def __init__(self):
# Mechanize Browser
self.browser = mechanize.Browser()
# Cookie Jar
self.jar = cookielib.LWPCookieJar()
self.browser.set_cookiejar(self.jar)
# Browser options
self.browser.set_handle_equiv(True)
# self.browser.set_handle_gzip(True)
self.browser.set_handle_redirect(True)
self.browser.set_handle_referer(True)
self.browser.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
self.browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
self.__network_failures_retry__ = 0
# time of last http request
self.last_time = 0
def do_request(self, url ='', data = {}, request = None, is_post = False, url_fix = True):
"""
Base method to perform a request to a url.
Input :
- url (string) : url of page to retrive
- data (hash {'param': 'value_param'}) : data to send to server, if an empty hash, it is not taken into account
Output:
- (html, code) : html as string and code as defined in the class docstring.
"""
if url_fix:
# Making sure it is utf8 encoded
url = self.url_fix(url)
# Request cannot happen inside a cetain lapse of time (INTERVAL seconds in between)
now = time.time()
if now-self.last_time<BaseScraper.INTERVAL:
print 'Waiting %d ms in order not to flood server'%((BaseScraper.INTERVAL+self.last_time-now)*1000)
time.sleep(BaseScraper.INTERVAL+self.last_time-now)
return self.do_request( url, data, request, is_post= is_post, url_fix = url_fix)
self.last_time = now
# Encapsulating request in try block in order to catch HTTPError
try:
if request is not None:
self.jar.add_cookie_header(request)
response = self.browser.open(request)
print "Fetching page from "+response.geturl()
print "Using personalized Request"
html = response.read()
elif not is_post:
print "Fetching page from "+url
print "GET method used"
response = self.browser.open(url)
html = response.read()
else:
print "Fetching page from "+url
print "POST method used"
form_data = urllib.urlencode(data)
response = self.browser.open(url, form_data)
html = response.read()
self.__network_failures_retry__ = 0 # Everything went OK, setting variable for network failure to 0
return html, 200
except mechanize.HTTPError, e:
if e.code == 404:
print "Error when retrieving "+url+" : page not found."
return None, 404
else:
print 'Error : %s'%(e)
self.__network_failures_retry__ = self.__network_failures_retry__ + 1
if self.__network_failures_retry__ < BaseScraper.MAX_NETWORK_FAILURE_TRIES:
print "Error occured, retrying in "+str(self.__network_failures_retry__)+" s"
time.sleep(self.__network_failures_retry__)
return self.do_request(url, data, is_post = is_post, url_fix = url_fix)
else:
print "Error when retrieving "+url
return None, e.code
except mechanize.URLError, e:
print 'Error : %s'%(e)
self.__network_failures_retry__ = self.__network_failures_retry__ + 1
if self.__network_failures_retry__ < BaseScraper.MAX_NETWORK_FAILURE_TRIES:
print "Error occured, retrying in "+str(self.__network_failures_retry__)+" s"
time.sleep(self.__network_failures_retry__)
return self.do_request(url, data, is_post = is_post, url_fix = url_fix)
else:
print "Error when retrieving "+url
return None, -1
except Exception, e:
print 'Unexpected error occured.'
print e
return None, -1
def get(self,url, url_fix = True):
"""
Executes a GET url fetch.
"""
return self.do_request(url, url_fix = url_fix)
def post(self, url, data = {}, url_fix = True):
"""
Executes a POST url fetch.
"""
return self.do_request(url, data = data, is_post=True, url_fix = url_fix)
def empty_cookie_jar(self):
"""
Removing all cookies from cookie jar
"""
self.jar.clear()
def get_cookie(self, name = None):
"""
Get cookie by name
Input :
- name (string) : name of cookie.
Output :
- hash : {
'name': ...,
'value': ...
}
"""
cookie = {}
if name:
for c in self.jar:
if name == c.name:
cookie['name'] = c.name
cookie['value'] = c.value
return cookie
def url_fix(self, s, charset='utf-8'):
"""
Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers
handle data entered by the user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param charset: The target charset for the URL if the url was
given as unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
| mit |
abhikumar22/MYBLOG | blg/Lib/site-packages/django-1.11.7-py3.6.egg/django/middleware/http.py | 64 | 1670 | from django.utils.cache import (
cc_delim_re, get_conditional_response, set_response_etag,
)
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import parse_http_date_safe
class ConditionalGetMiddleware(MiddlewareMixin):
"""
Handles conditional GET operations. If the response has an ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified. An ETag
header is added if needed.
"""
def process_response(self, request, response):
# It's too late to prevent an unsafe request with a 412 response, and
# for a HEAD request, the response body is always empty so computing
# an accurate ETag isn't possible.
if request.method != 'GET':
return response
if self.needs_etag(response) and not response.has_header('ETag'):
set_response_etag(response)
etag = response.get('ETag')
last_modified = response.get('Last-Modified')
if last_modified:
last_modified = parse_http_date_safe(last_modified)
if etag or last_modified:
return get_conditional_response(
request,
etag=etag,
last_modified=last_modified,
response=response,
)
return response
def needs_etag(self, response):
"""
Return True if an ETag header should be added to response.
"""
cache_control_headers = cc_delim_re.split(response.get('Cache-Control', ''))
return all(header.lower() != 'no-store' for header in cache_control_headers)
| gpl-3.0 |
lifenggg/assaydata | get_NDCG_test.py | 1 | 4798 | import math
import numpy
from os import path
def get_ndcg(test_path, pred_path):
test_rank = []
pred_rank = []
# all the return value
ndcg10 = 0
ndcg5 = 0
ndcgall = 0
if path.isfile(pred_path) == False:
return [0, 0, 0, 0]
with open(test_path) as fp:
for line in fp:
splits = line.split(' ')
test_rank.append(float(splits[0]))
with open(pred_path) as fp:
for line in fp:
pred_rank.append(float(line))
#print("test rank:", test_rank)
#print("prediction rank:", pred_rank)
#get the index of the sorted list
index_test = sorted(range(len(test_rank)), key=lambda k: test_rank[k], reverse =1)
#get the index of the sorted list in prediction
index_pred = sorted(range(len(pred_rank)), key=lambda k: pred_rank[k], reverse =1)
#print("test index after sorted based score",index_test)
#print("pred_index after sorted based score",index_pred)
#print("length is ", len(index_pred))
DCG = 0
#this best DCG is for normalization
best_DCG = 0
current_rank = 0
#this is index is for the best ranking
if len(index_test)!=len(index_pred):
print("prediction and test set should have the same length")
#print("n DCG max_DCG NDCG")
#this is the least and largest CID score
min_range = test_rank[index_test[len(index_test)-1]]
max_range = test_rank[index_test[0]]
#print("max_range:", max_range, "min_range", min_range)
for iter in range(0, len(index_pred)):
# a pointer to pred_set
i = index_pred[iter]
# a pointer to test_set
j = index_test[iter]
# actual score of this doc
# in the NDCG the score should normalized to 0~5, 0 for bad, 5 for exellent
#print(iter,"'s interation, i(pred):,j(test): =",test_rank[i], test_rank[j])
score = 5*(test_rank[i]-min_range)/(max_range-min_range)
best_score = 5*(test_rank[j]-min_range)/(max_range-min_range)
#score_best = 5*(x-min_range)/(max_range-min_range)
#score = (108.803-math.e**(-test_rank[i]))/20
#best_score = (108.803-math.e**(-test_rank[j]))/20
#print("score", score)
#print("best score",best_score)
Gain = 2**score-1
best_Gain = 2**best_score-1
#print("get gain:", Gain)
CG = (1/math.log(iter+2, 2))*Gain
best_CG = (1/math.log(iter+2, 2))*best_Gain
print("add CG :", CG)
print("add bestCG :", best_CG)
DCG += CG
best_DCG += best_CG
#print("DCG is :", DCG)
ndcg = DCG/best_DCG
if iter == 9:
ndcg10 = ndcg
if iter == 4:
ndcg5 = ndcg
if iter == len(index_pred)-1:
ndcgall = ndcg
#print(iter+1, DCG, best_DCG, DCG/best_DCG)
return [ndcg10, ndcg5, ndcgall, len(index_pred)]
if __name__ == "__main__":
for rank_function in [0]: # svm_rank:0 or svm_light:1
if rank_function == 0:
print 'for svm ranking \n'
output_path = 'NDCG_result/svm_rank.result'
else:
print 'for svm light \n'
output_path = 'NDCG_result/svm_light.result'
#f = open('assaylist', 'r')
#for line in f:
# for each assay in the assay list
#assayname = line[:-1]
assayname = '625248.csv.out.2'
ndcg10_this_assay = []
ndcg5_this_assay = []
ndcgall_this_assay = []
for fold_id in [4]:
mycase = assayname + '_' +str(fold_id)
test_path = 'testdata/' + mycase + '.test'
if rank_function == 0:
pred_path = 'svm_rank_pred/' + mycase + '.pred'
else:
pred_path = 'svm_light_pred/' + mycase + '.pred'
[ndcg10,ndcg5,ndcgall, rank_length] = get_ndcg(test_path, pred_path)
if ndcg10 != 0:
ndcg10_this_assay.append(ndcg10)
if ndcg5 != 0:
ndcg5_this_assay.append(ndcg5)
if ndcgall != 0:
ndcgall_this_assay.append(ndcgall)
# average of the ndcg
avg_ndcg10 = numpy.average(ndcg10_this_assay)
avg_ndcg5 = numpy.average(ndcg5_this_assay)
avg_ndcgall= numpy.average(ndcgall_this_assay)
#variance of the ndcg
var_ndcg10 = numpy.var(ndcg10_this_assay)
var_ndcg5 = numpy.var(ndcg5_this_assay)
var_ndcgall = numpy.var(ndcgall_this_assay)
print assayname + ' ' + str(rank_length)+ ' 5 ' + str(avg_ndcg5) + ' ' + str(var_ndcg5)
print assayname + ' ' + str(rank_length)+ ' 10 ' + str(avg_ndcg10) + ' ' + str(var_ndcg10)
print assayname + ' ' + str(rank_length)+ ' all ' + str(avg_ndcgall) + ' ' + str(var_ndcgall)
| gpl-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/test/test_threaded_import.py | 136 | 2578 | # This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import unittest
from test.test_support import verbose, TestFailed, import_module
thread = import_module('thread')
critical_section = thread.allocate_lock()
done = thread.allocate_lock()
def task():
global N, critical_section, done
import random
x = random.randrange(1, 3)
critical_section.acquire()
N -= 1
# Must release critical_section before releasing done, else the main
# thread can exit and set critical_section to None as part of global
# teardown; then critical_section.release() raises AttributeError.
finished = N == 0
critical_section.release()
if finished:
done.release()
def test_import_hangers():
import sys
if verbose:
print "testing import hangers ...",
import test.threaded_import_hangers
try:
if test.threaded_import_hangers.errors:
raise TestFailed(test.threaded_import_hangers.errors)
elif verbose:
print "OK."
finally:
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
del sys.modules['test.threaded_import_hangers']
# Tricky: When regrtest imports this module, the thread running regrtest
# grabs the import lock and won't let go of it until this module returns.
# All other threads attempting an import hang for the duration. Since
# this test spawns threads that do little *but* import, we can't do that
# successfully until after this module finishes importing and regrtest
# regains control. To make this work, a special case was added to
# regrtest to invoke a module's "test_main" function (if any) after
# importing it.
def test_main(): # magic name! see above
global N, done
import imp
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done.acquire()
for N in (20, 50) * 3:
if verbose:
print "Trying", N, "threads ...",
for i in range(N):
thread.start_new_thread(task, ())
done.acquire()
if verbose:
print "OK."
done.release()
test_import_hangers()
if __name__ == "__main__":
test_main()
| gpl-2.0 |
awemulya/fieldsight-kobocat | onadata/apps/fsforms/models.py | 1 | 37515 | from __future__ import unicode_literals
import datetime
import os
import json
import re
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Max
from django.db.models.signals import post_save, pre_delete
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from jsonfield import JSONField
from pyxform import create_survey_from_xls, SurveyElementBuilder
from pyxform.xform2json import create_survey_element_from_xml
from xml.dom import Node
from onadata.apps.fieldsight.models import Site, Project, Organization
from onadata.apps.fsforms.fieldsight_models import IntegerRangeField
from onadata.apps.fsforms.utils import send_message, send_message_project_form, check_version
from onadata.apps.logger.models import XForm, Instance
from onadata.apps.logger.xform_instance_parser import clean_and_parse_xml
from onadata.apps.viewer.models import ParsedInstance
from onadata.apps.fsforms.fsxform_responses import get_instances_for_field_sight_form
from onadata.settings.local_settings import XML_VERSION_MAX_ITER
#To get domain to give complete url for app devs to make them easier.
from django.contrib.sites.models import Site as DjangoSite
from onadata.libs.utils.model_tools import set_uuid
SHARED_LEVEL = [(0, 'Global'), (1, 'Organization'), (2, 'Project'),]
SCHEDULED_LEVEL = [(0, 'Daily'), (1, 'Weekly'), (2, 'Monthly'),]
FORM_STATUS = [(0, 'Pending'), (1, 'Rejected'), (2, 'Flagged'), (3, 'Approved'), ]
class FormGroup(models.Model):
name = models.CharField(max_length=256, unique=True)
description = models.TextField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, related_name="form_group")
is_global = models.BooleanField(default=False)
organization = models.ForeignKey(Organization, null=True, blank=True)
project = models.ForeignKey(Project, null=True, blank=True)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_group'
verbose_name = _("FieldSight Form Group")
verbose_name_plural = _("FieldSight Form Groups")
ordering = ("-date_modified",)
def __unicode__(self):
return getattr(self, "name", "")
class Stage(models.Model):
name = models.CharField(max_length=256)
description = models.TextField(blank=True, null=True)
group = models.ForeignKey(FormGroup,related_name="stage", null=True, blank=True)
order = IntegerRangeField(min_value=0, max_value=30,default=0)
stage = models.ForeignKey('self', blank=True, null=True, related_name="parent")
shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
site = models.ForeignKey(Site, related_name="stages", null=True, blank=True)
project = models.ForeignKey(Project, related_name="stages", null=True, blank=True)
ready = models.BooleanField(default=False)
project_stage_id = models.IntegerField(default=0)
weight = models.IntegerField(default=0)
tags = ArrayField(models.IntegerField(), default=[])
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_stage'
verbose_name = _("FieldSight Form Stage")
verbose_name_plural = _("FieldSight Form Stages")
ordering = ("order",)
def save(self, *args, **kwargs):
if self.stage:
self.group = self.stage.group
super(Stage, self).save(*args, **kwargs)
def get_display_name(self):
return "Stage" if not self.stage else "SubStage"
def is_main_stage(self):
return True if not self.stage else False
def sub_stage_count(self):
if not self.stage:
return Stage.objects.filter(stage=self).count()
return 0
def form_exists(self):
return True if FieldSightXF.objects.filter(stage=self).count() > 0 else False
def form_name(self):
if not FieldSightXF.objects.filter(stage=self).count():
return ""
return FieldSightXF.objects.filter(stage=self)[0].xf.title
def form(self):
if not FieldSightXF.objects.filter(stage=self).count():
return None
return FieldSightXF.objects.filter(stage=self)[0]
def active_substages(self):
return self.parent.filter(stage_forms__isnull=False)
def get_sub_stage_list(self):
if not self.stage:
return Stage.objects.filter(stage=self).values('stage_forms__id','name','stage_id')
return []
@property
def xf(self):
return FieldSightXF.objects.filter(stage=self)[0].xf.pk if self.form_exists() else None
@property
def form_status(self):
status = 0
if self.stage_forms.site_form_instances.filter(form_status=3).exists():
status = 1
return status
@property
def form_count(self):
return self.stage_forms.site_form_instances.all().count()
@staticmethod
def site_submission_count(id, site_id):
return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(site_id=site_id).count()
@staticmethod
def rejected_submission_count(id, site_id):
return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=1, site_id=site_id).count()
@staticmethod
def flagged_submission_count(id, site_id):
return Stage.objects.get(pk=id).stage_forms.project_form_instances.filter(form_status=2, site_id=site_id).count()
@classmethod
def get_order(cls, site, project, stage):
if site:
if not Stage.objects.filter(site=site).exists():
return 1
elif stage is not None:
if not Stage.objects.filter(stage=stage).exists():
return 1
else:
mo = Stage.objects.filter(stage=stage).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
else:
mo = Stage.objects.filter(site=site, stage__isnull=True).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
else:
if not Stage.objects.filter(project=project).exists():
return 1
elif stage is not None:
if not Stage.objects.filter(stage=stage).exists():
return 1
else:
mo = Stage.objects.filter(stage=stage).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
else:
mo = Stage.objects.filter(project=project, stage__isnull=True).aggregate(Max('order'))
order = mo.get('order__max', 0)
return order + 1
def __unicode__(self):
return getattr(self, "name", "")
class Days(models.Model):
day = models.CharField(max_length=9)
index = models.IntegerField()
def __unicode__(self):
return getattr(self, "day", "")
class Schedule(models.Model):
name = models.CharField("Schedule Name", max_length=256, blank=True, null=True)
site = models.ForeignKey(Site, related_name="schedules", null=True, blank=True)
project = models.ForeignKey(Project, related_name="schedules", null=True, blank=True)
date_range_start = models.DateField(default=datetime.date.today)
date_range_end = models.DateField(default=datetime.date.today)
selected_days = models.ManyToManyField(Days, related_name='days', blank=True,)
shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)
schedule_level_id = models.IntegerField(default=0, choices=SCHEDULED_LEVEL)
date_created = models.DateTimeField(auto_now_add=True)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_schedule'
verbose_name = _("Form Schedule")
verbose_name_plural = _("Form Schedules")
ordering = ('-date_range_start', 'date_range_end')
def form_exists(self):
return True if FieldSightXF.objects.filter(schedule=self).count() > 0 else False
def form(self):
return FieldSightXF.objects.filter(schedule=self)[0] if self.form_exists() else None
@property
def xf(self):
return FieldSightXF.objects.filter(schedule=self)[0].xf.pk if self.form_exists() else None
def __unicode__(self):
return getattr(self, "name", "")
class DeletedXForm(models.Model):
xf = models.OneToOneField(XForm, related_name="deleted_xform")
date_created = models.DateTimeField(auto_now=True)
class FieldSightXF(models.Model):
xf = models.ForeignKey(XForm, related_name="field_sight_form")
site = models.ForeignKey(Site, related_name="site_forms", null=True, blank=True)
project = models.ForeignKey(Project, related_name="project_forms", null=True, blank=True)
is_staged = models.BooleanField(default=False)
is_scheduled = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now=True)
date_modified = models.DateTimeField(auto_now=True)
schedule = models.OneToOneField(Schedule, blank=True, null=True, related_name="schedule_forms")
stage = models.OneToOneField(Stage, blank=True, null=True, related_name="stage_forms")
shared_level = models.IntegerField(default=2, choices=SHARED_LEVEL)
form_status = models.IntegerField(default=0, choices=FORM_STATUS)
fsform = models.ForeignKey('self', blank=True, null=True, related_name="parent")
is_deployed = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
is_survey = models.BooleanField(default=False)
from_project = models.BooleanField(default=True)
default_submission_status = models.IntegerField(default=0, choices=FORM_STATUS)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
db_table = 'fieldsight_forms_data'
# unique_together = (("xf", "site"), ("xf", "is_staged", "stage"),("xf", "is_scheduled", "schedule"))
verbose_name = _("XForm")
verbose_name_plural = _("XForms")
ordering = ("-date_created",)
def url(self):
return reverse(
"download_fild_sight_form",
kwargs={
"site": self.site.username,
"id_string": self.id_string
}
)
def getname(self):
return '{0} form {1}'.format(self.form_type(),
self.xf.title,)
def getresponces(self):
return get_instances_for_field_sight_form(self.pk)
def getlatestsubmittiondate(self):
if self.site is not None:
return self.site_form_instances.order_by('-pk').values('date')[:1]
else:
return self.project_form_instances.order_by('-pk').values('date')[:1]
def get_absolute_url(self):
if self.project:
# return reverse('forms:project_html_export', kwargs={'fsxf_id': self.pk})
return reverse('forms:setup-forms', kwargs={'is_project':1, 'pk':self.project_id})
else:
# return reverse('forms:formpack_html_export', kwargs={'fsxf_id': self.pk})
return reverse('forms:setup-forms', kwargs={'is_project':0, 'pk':self.site_id})
def form_type(self):
if self.is_scheduled:
return "scheduled"
if self.is_staged:
return "staged"
if self.is_survey:
return "survey"
if not self.is_scheduled and not self.is_staged:
return "general"
def form_type_id(self):
if self.is_scheduled and self.schedule: return self.schedule.id
if self.is_staged and self.stage: return self.stage.id
return None
def stage_name(self):
if self.stage: return self.stage.name
def schedule_name(self):
if self.schedule: return self.schedule.name
def clean(self):
if self.is_staged:
if FieldSightXF.objects.filter(stage=self.stage).exists():
if not FieldSightXF.objects.filter(stage=self.stage).pk == self.pk:
raise ValidationError({
'xf': ValidationError(_('Duplicate Stage Data')),
})
if self.is_scheduled:
if FieldSightXF.objects.filter(schedule=self.schedule).exists():
if not FieldSightXF.objects.filter(schedule=self.schedule)[0].pk == self.pk:
raise ValidationError({
'xf': ValidationError(_('Duplicate Schedule Data')),
})
if not self.is_scheduled and not self.is_staged:
if self.site:
if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,project=self.site.project).exists():
raise ValidationError({
'xf': ValidationError(_('Form Already Used in Project Level')),
})
else:
if FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,
site=self.site, project=self.project).exists():
if not FieldSightXF.objects.filter(xf=self.xf, is_scheduled=False, is_staged=False,
site=self.site, project=self.project)[0].pk == self.pk:
raise ValidationError({
'xf': ValidationError(_('Duplicate General Form Data')),
})
@staticmethod
def get_xform_id_list(site_id):
fs_form_list = FieldSightXF.objects.filter(site__id=site_id).order_by('xf__id').distinct('xf__id')
return [fsform.xf.pk for fsform in fs_form_list]
@property
def site_name(self):
if self.site is not None:
return u'{}'.format(self.site.name)\
@property
def site_or_project_display(self):
if self.site is not None:
return u'{}'.format(self.site.name)
return u'{}'.format(self.project.name)
@property
def project_info(self):
if self.fsform:
self.fsform.pk
return None
@property
def has_versions(self):
return self.xf.fshistory.exists()
def __unicode__(self):
return u'{}- {}- {}'.format(self.xf, self.site, self.is_staged)
@receiver(post_save, sender=FieldSightXF)
def create_messages(sender, instance, created, **kwargs):
if instance.project is not None and created and not instance.is_staged:
send_message_project_form(instance)
elif created and instance.site is not None and not instance.is_staged:
send_message(instance)
@receiver(pre_delete, sender=FieldSightXF)
def send_delete_message(sender, instance, using, **kwargs):
if instance.project is not None:
pass
elif instance.is_staged:
pass
else:
fxf = instance
send_message(fxf)
post_save.connect(create_messages, sender=FieldSightXF)
class FieldSightParsedInstance(ParsedInstance):
_update_fs_data = None
class Meta:
proxy = True
def save(self, *args, **kwargs):
self._update_fs_data = kwargs.pop('update_fs_data', {})
super(FieldSightParsedInstance, self).save(*args, **kwargs)
def to_dict_for_mongo(self):
mongo_dict = super(FieldSightParsedInstance, self).to_dict_for_mongo()
mongo_dict.update(self._update_fs_data)
return mongo_dict
@staticmethod
def get_or_create(instance, update_data=None):
if update_data is None:
update_data = {}
created = False
try:
fspi = FieldSightParsedInstance.objects.get(instance__pk=instance.pk)
fspi.save(update_fs_data=update_data, async=False)
except FieldSightParsedInstance.DoesNotExist:
created = True
fspi = FieldSightParsedInstance(instance=instance)
fspi.save(update_fs_data=update_data, async=False)
return fspi, created
class FInstanceManager(models.Manager):
def get_queryset(self):
return super(FInstanceManager, self).get_queryset().filter(is_deleted=False)
class FInstanceDeletedManager(models.Manager):
def get_queryset(self):
return super(FInstanceDeletedManager, self).get_queryset().filter(is_deleted=True)
class FInstance(models.Model):
instance = models.OneToOneField(Instance, related_name='fieldsight_instance')
site = models.ForeignKey(Site, null=True, related_name='site_instances')
project = models.ForeignKey(Project, null=True, related_name='project_instances')
site_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='site_form_instances', on_delete=models.SET_NULL)
project_fxf = models.ForeignKey(FieldSightXF, null=True, related_name='project_form_instances')
form_status = models.IntegerField(null=True, blank=True, choices=FORM_STATUS)
date = models.DateTimeField(auto_now=True)
submitted_by = models.ForeignKey(User, related_name="supervisor")
is_deleted = models.BooleanField(default=False)
version = models.CharField(max_length=255, default=u'')
objects = FInstanceManager()
deleted_objects = FInstanceDeletedManager()
logs = GenericRelation('eventlog.FieldSightLog')
@property
def get_version(self):
return self.instance.json['__version__']
def save(self, *args, **kwargs):
self.version = self.get_version
if self.project_fxf is not None and self.project_fxf.is_staged and self.site is not None:
self.site.update_current_progress()
elif self.site is not None:
self.site.update_status()
if self.form_status is None:
if self.site_fxf:
self.form_status = self.site_fxf.default_submission_status
else:
self.form_status = self.project_fxf.default_submission_status
super(FInstance, self).save(*args, **kwargs) # Call the "real" save() method.
@property
def fsxfid(self):
if self.project_fxf:
return self.project_fxf.id
else:
return self.site_fxf.id\
@property
def fsxf(self):
if self.project_fxf:
return self.project_fxf
else:
return self.site_fxf
def get_absolute_url(self):
if self.site_fxf:
fxf_id = self.site_fxf_id
else:
fxf_id = self.project_fxf_id
return "/forms/forms/" + str(fxf_id) + "#/" + str(self.instance.id)
def get_abr_form_status(self):
return dict(FORM_STATUS)[self.form_status]
def getname(self):
if self.site_fxf is None:
return '{0} form {1}'.format(self.project_fxf.form_type(), self.project_fxf.xf.title,)
return '{0} form {1}'.format(self.site_fxf.form_type(),
self.site_fxf.xf.title,)
def __unicode__(self):
if self.site_fxf is None:
return u"%s" % str(self.submitted_by) + "---" + self.project_fxf.xf.title
return u"%s" % str(self.submitted_by) + "---" + self.site_fxf.xf.title
def instance_json(self):
return json.dumps(self.instance.json)
def get_responces(self):
data=[]
json_answer = self.instance.json
json_question = json.loads(self.instance.xform.json)
base_url = DjangoSite.objects.get_current().domain
media_folder = self.instance.xform.user.username
def parse_repeat(r_object):
r_question = r_object['name']
data.append(r_question)
if r_question in json_answer:
for gnr_answer in json_answer[r_question]:
for first_children in r_object['children']:
question_type = first_children['type']
question = first_children['name']
group_answer = json_answer[r_question]
answer = ''
if r_question+"/"+question in gnr_answer:
if first_children['type'] == 'note':
answer= ''
elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video':
answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+gnr_answer[r_question+"/"+question]
else:
answer = gnr_answer[r_question+"/"+question]
if 'label' in first_children:
question = first_children['label']
row={'type':question_type, 'question':question, 'answer':answer}
data.append(row)
else:
for first_children in r_object['children']:
question_type = first_children['type']
question = first_children['name']
answer = ''
if 'label' in first_children:
question = first_children['label']
row={'type':question_type, 'question':question, 'answer':answer}
data.append(row)
def parse_group(prev_groupname, g_object):
g_question = prev_groupname+g_object['name']
for first_children in g_object['children']:
question = first_children['name']
question_type = first_children['type']
if question_type == 'group':
parse_group(g_question+"/",first_children)
continue
answer = ''
if g_question+"/"+question in json_answer:
if question_type == 'note':
answer= ''
elif question_type == 'photo' or question_type == 'audio' or question_type == 'video':
answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[g_question+"/"+question]
else:
answer = json_answer[g_question+"/"+question]
if 'label' in first_children:
question = first_children['label']
row={'type':question_type, 'question':question, 'answer':answer}
data.append(row)
def parse_individual_questions(parent_object):
for first_children in parent_object:
if first_children['type'] == "repeat":
parse_repeat(first_children)
elif first_children['type'] == 'group':
parse_group("",first_children)
else:
question = first_children['name']
question_type = first_children['type']
answer= ''
if question in json_answer:
if first_children['type'] == 'note':
answer= ''
elif first_children['type'] == 'photo' or first_children['type'] == 'audio' or first_children['type'] == 'video':
answer = 'http://'+base_url+'/attachment/medium?media_file=/'+ media_folder +'attachments/'+json_answer[question]
else:
answer = json_answer[question]
if 'label' in first_children:
question = first_children['label']
row={"type":question_type, "question":question, "answer":answer}
data.append(row)
submitted_by={'type':'submitted_by','question':'Submitted by', 'answer':json_answer['_submitted_by']}
submittion_time={'type':'submittion_time','question':'Submittion Time', 'answer':json_answer['_submission_time']}
data.append(submitted_by)
data.append(submittion_time)
parse_individual_questions(json_question['children'])
return data
class InstanceStatusChanged(models.Model):
finstance = models.ForeignKey(FInstance, related_name="comments")
message = models.TextField(null=True, blank=True)
date = models.DateTimeField(auto_now=True)
old_status = models.IntegerField(default=0, choices=FORM_STATUS)
new_status = models.IntegerField(default=0, choices=FORM_STATUS)
user = models.ForeignKey(User, related_name="submission_comments")
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
ordering = ['-date']
def get_absolute_url(self):
return reverse('forms:alter-status-detail', kwargs={'pk': self.pk})
def getname(self):
return '{0} form {1}'.format(self.finstance.site_fxf.form_type(), self.finstance.site_fxf.xf.title)
class InstanceImages(models.Model):
instance_status = models.ForeignKey(InstanceStatusChanged, related_name="images")
image = models.ImageField(upload_to="submission-feedback-images",
verbose_name='Status Changed Images',)
class FieldSightFormLibrary(models.Model):
xf = models.ForeignKey(XForm)
is_global = models.BooleanField(default=False)
shared_date = models.DateTimeField(auto_now=True)
organization = models.ForeignKey(Organization, null=True, blank=True)
project = models.ForeignKey(Project, null=True, blank=True)
logs = GenericRelation('eventlog.FieldSightLog')
class Meta:
verbose_name = _("Library")
verbose_name_plural = _("Library")
ordering = ("-shared_date",)
class EducationMaterial(models.Model):
is_pdf = models.BooleanField(default=False)
pdf = models.FileField(upload_to="education-material-pdf", null=True, blank=True)
title = models.CharField(max_length=31, blank=True, null=True)
text = models.TextField(blank=True, null=True)
stage = models.OneToOneField(Stage, related_name="em", null=True, blank=True)
fsxf = models.OneToOneField(FieldSightXF, related_name="em", null=True, blank=True)
class EducationalImages(models.Model):
educational_material = models.ForeignKey(EducationMaterial, related_name="em_images")
image = models.ImageField(upload_to="education-material-images",
verbose_name='Education Images',)
# @receiver(post_save, sender=Site)
# def copy_stages_from_project(sender, **kwargs):
# site = kwargs.get('instance')
# created = kwargs.get('created')
# if created:
# project = site.project
# project_main_stages = project.stages.filter(stage__isnull=True)
# for pms in project_main_stages:
# project_sub_stages = Stage.objects.filter(stage__id=pms.pk, stage_forms__is_deleted=False, stage_forms__is_deployed=True)
# if not project_sub_stages:
# continue
# site_main_stage = Stage(name=pms.name, order=pms.order, site=site, description=pms.description,
# project_stage_id=pms.id, weight=pms.weight)
# site_main_stage.save()
# for pss in project_sub_stages:
# if pss.tags and site.type:
# if site.type.id not in pss.tags:
# continue
# site_sub_stage = Stage(name=pss.name, order=pss.order, site=site,
# description=pss.description, stage=site_main_stage, project_stage_id=pss.id, weight=pss.weight)
# site_sub_stage.save()
# if FieldSightXF.objects.filter(stage=pss).exists():
# fsxf = pss.stage_forms
# site_form = FieldSightXF(is_staged=True, default_submission_status=fsxf.default_submission_status, xf=fsxf.xf, site=site,fsform=fsxf, stage=site_sub_stage, is_deployed=True)
# site_form.save()
# general_forms = project.project_forms.filter(is_staged=False, is_scheduled=False, is_deployed=True, is_deleted=False)
# for general_form in general_forms:
# FieldSightXF.objects.create(is_staged=False, default_submission_status=general_form.default_submission_status, is_scheduled=False, is_deployed=True, site=site,
# xf=general_form.xf, fsform=general_form)
#
# schedule_forms = project.project_forms.filter(is_scheduled=True, is_deployed=True, is_deleted=False)
# for schedule_form in schedule_forms:
# schedule = schedule_form.schedule
# selected_days = tuple(schedule.selected_days.all())
# s = Schedule.objects.create(name=schedule.name, site=site, date_range_start=schedule.date_range_start,
# date_range_end=schedule.date_range_end)
# s.selected_days.add(*selected_days)
# s.save()
# FieldSightXF.objects.create(is_scheduled=True, default_submission_status=schedule_form.default_submission_status, xf=schedule_form.xf, site=site, fsform=schedule_form,
# schedule=s, is_deployed=True)
class DeployEvent(models.Model):
form_changed = models.BooleanField(default=True)
data = JSONField(default={})
date = models.DateTimeField(auto_now=True)
site = models.ForeignKey(Site, related_name="deploy_data", null=True)
project = models.ForeignKey(Project, related_name="deploy_data", null=True)
def upload_to(instance, filename):
return os.path.join(
'versions', str(instance.pk),
'xls',
os.path.split(filename)[1])
class XformHistory(models.Model):
class Meta:
unique_together = ('xform', 'version')
def _set_uuid_in_xml(self, file_name=None):
"""
Add bind to automatically set UUID node in XML.
"""
if not file_name:
file_name = self.file_name()
file_name, file_ext = os.path.splitext(file_name)
doc = clean_and_parse_xml(self.xml)
model_nodes = doc.getElementsByTagName("model")
if len(model_nodes) != 1:
raise Exception(u"xml contains multiple model nodes")
model_node = model_nodes[0]
instance_nodes = [node for node in model_node.childNodes if
node.nodeType == Node.ELEMENT_NODE and
node.tagName.lower() == "instance" and
not node.hasAttribute("id")]
if len(instance_nodes) != 1:
raise Exception(u"Multiple instance nodes without the id "
u"attribute, can't tell which is the main one")
instance_node = instance_nodes[0]
# get the first child whose id attribute matches our id_string
survey_nodes = [node for node in instance_node.childNodes
if node.nodeType == Node.ELEMENT_NODE and
(node.tagName == file_name or
node.attributes.get('id'))]
if len(survey_nodes) != 1:
raise Exception(
u"Multiple survey nodes with the id '%s'" % self.id_string)
survey_node = survey_nodes[0]
formhub_nodes = [n for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and
n.tagName == "formhub"]
if len(formhub_nodes) > 1:
raise Exception(
u"Multiple formhub nodes within main instance node")
elif len(formhub_nodes) == 1:
formhub_node = formhub_nodes[0]
else:
formhub_node = survey_node.insertBefore(
doc.createElement("formhub"), survey_node.firstChild)
uuid_nodes = [node for node in formhub_node.childNodes if
node.nodeType == Node.ELEMENT_NODE and
node.tagName == "uuid"]
if len(uuid_nodes) == 0:
formhub_node.appendChild(doc.createElement("uuid"))
if len(formhub_nodes) == 0:
# append the calculate bind node
calculate_node = doc.createElement("bind")
calculate_node.setAttribute(
"nodeset", "/%s/formhub/uuid" % file_name)
calculate_node.setAttribute("type", "string")
calculate_node.setAttribute("calculate", "'%s'" % self.uuid)
model_node.appendChild(calculate_node)
self.xml = doc.toprettyxml(indent=" ", encoding='utf-8')
# hack
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-\
# and-silly-whitespace/
text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
output_re = re.compile('\n.*(<output.*>)\n( )*')
prettyXml = text_re.sub('>\g<1></', self.xml.decode('utf-8'))
inlineOutput = output_re.sub('\g<1>', prettyXml)
inlineOutput = re.compile('<label>\s*\n*\s*\n*\s*</label>').sub(
'<label></label>', inlineOutput)
self.xml = inlineOutput
xform = models.ForeignKey(XForm, related_name="fshistory")
date = models.DateTimeField(auto_now=True)
xls = models.FileField(upload_to=upload_to, null=True)
json = models.TextField(default=u'')
description = models.TextField(default=u'', null=True)
xml = models.TextField()
id_string = models.CharField(editable=False, max_length=255)
title = models.CharField(editable=False, max_length=255)
uuid = models.CharField(max_length=32, default=u'')
version = models.CharField(max_length=255, default=u'')
@property
def get_version(self):
import re
n = XML_VERSION_MAX_ITER
xml = self.xml
p = re.compile('version="(.*)">')
m = p.search(xml)
if m:
return m.group(1)
version = check_version(xml)
if version:
return version
else:
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version_" """)
m = p.search(xml)
if m:
return m.group(1)
p1 = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/_version_" """)
m1 = p.search(xml)
if m1:
return m1.group(1)
p1 = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
return m1.group(1)
p1 = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
return m1.group(1)
return None
def check_version(xml, n):
for i in range(n, 0, -1):
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version__00{0}" """.format(i))
m = p.search(xml)
if m:
return m.group(1)
p = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/_version__00{0}" """.format(i))
m1 = p.search(xml)
if m1:
return m1.group(1)
return None
def save(self, *args, **kwargs):
if self.xls and not self.xml:
survey = create_survey_from_xls(self.xls)
self.json = survey.to_json()
self.xml = survey.to_xml()
self._mark_start_time_boolean()
# set_uuid(self)
# self._set_uuid_in_xml()
if not self.version:
self.version = self.get_version
super(XformHistory, self).save(*args, **kwargs)
def file_name(self):
return os.path.split(self.xls.name)[-1]
def _mark_start_time_boolean(self):
starttime_substring = 'jr:preloadParams="start"'
if self.xml.find(starttime_substring) != -1:
self.has_start_time = True
else:
self.has_start_time = False
def get_survey(self):
if not hasattr(self, "_survey"):
try:
builder = SurveyElementBuilder()
self._survey = \
builder.create_survey_element_from_json(self.json)
except ValueError:
xml = bytes(bytearray(self.xml, encoding='utf-8'))
self._survey = create_survey_element_from_xml(xml)
return self._survey
survey = property(get_survey)
class SubmissionOfflineSite(models.Model):
offline_site_id = models.CharField(max_length=20)
temporary_site = models.ForeignKey(Site, related_name="offline_submissions")
instance = models.OneToOneField(FInstance, blank=True, null=True, related_name="offline_submission")
fieldsight_form = models.ForeignKey(FieldSightXF, related_name="offline_submissiob" , null=True, blank=True)
def __unicode__(self):
if self.instance:
return u"%s ---------------%s" % (str(self.instance.id) ,self.offline_site_id)
return u"%s" % str(self.offline_site_id)
| bsd-2-clause |
marcel-dancak/QGIS | python/plugins/processing/tests/ToolsTest.py | 11 | 3325 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ToolsTest
---------------------
Date : July 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'July 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import shutil
from qgis.core import NULL, QgsVectorLayer
from qgis.testing import start_app, unittest
from processing.tests.TestData import points
from processing.tools import vector
testDataPath = os.path.join(os.path.dirname(__file__), 'testdata')
start_app()
class VectorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
for path in cls.cleanup_paths:
shutil.rmtree(path)
def testValues(self):
test_data = points()
test_layer = QgsVectorLayer(test_data, 'test', 'ogr')
# field by index
res = vector.values(test_layer, 1)
self.assertEqual(res[1], [1, 2, 3, 4, 5, 6, 7, 8, 9])
# field by name
res = vector.values(test_layer, 'id')
self.assertEqual(res['id'], [1, 2, 3, 4, 5, 6, 7, 8, 9])
# two fields
res = vector.values(test_layer, 1, 2)
self.assertEqual(res[1], [1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(res[2], [2, 1, 0, 2, 1, 0, 0, 0, 0])
# two fields by name
res = vector.values(test_layer, 'id', 'id2')
self.assertEqual(res['id'], [1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(res['id2'], [2, 1, 0, 2, 1, 0, 0, 0, 0])
# two fields by name and index
res = vector.values(test_layer, 'id', 2)
self.assertEqual(res['id'], [1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(res[2], [2, 1, 0, 2, 1, 0, 0, 0, 0])
def testConvertNulls(self):
self.assertEqual(vector.convert_nulls([]), [])
self.assertEqual(vector.convert_nulls([], '_'), [])
self.assertEqual(vector.convert_nulls([NULL]), [None])
self.assertEqual(vector.convert_nulls([NULL], '_'), ['_'])
self.assertEqual(vector.convert_nulls([NULL], -1), [-1])
self.assertEqual(vector.convert_nulls([1, 2, 3]), [1, 2, 3])
self.assertEqual(vector.convert_nulls([1, None, 3]), [1, None, 3])
self.assertEqual(vector.convert_nulls([1, NULL, 3, NULL]), [1, None, 3, None])
self.assertEqual(vector.convert_nulls([1, NULL, 3, NULL], '_'), [1, '_', 3, '_'])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
nazo/ansible | lib/ansible/modules/commands/raw.py | 56 | 3569 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: raw
short_description: Executes a low-down and dirty SSH command
version_added: historical
options:
free_form:
description:
- the raw module takes a free form command to run. There is no parameter actually named 'free form'; see the examples!
required: true
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
- when using privilege escalation (C(become)), a default shell will be assigned if one is not provided
as privilege escalation requires a shell.
required: false
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem. This is useful and should only be done in two cases. The
first case is installing C(python-simplejson) on older (Python 2.4 and
before) hosts that need it as a dependency to run modules, since nearly
all core modules require it. Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate. Arguments
given to C(raw) are run directly through the configured remote shell.
Standard output, error output and return code are returned when
available. There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
notes:
- "If using raw from a playbook, you may need to disable fact gathering
using C(gather_facts: no) if you're using C(raw) to bootstrap python
onto the machine."
- If you want to execute a command securely and predictably, it may be
better to use the M(command) or M(shell) modules instead.
- the C(environment) keyword does not work with raw normally, it requires a shell
which means it only works if C(executable) is set or using the module
with privilege escalation (C(become)).
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
- name: Bootstrap a legacy python 2.4 host
raw: yum -y install python-simplejson
- name: Bootstrap a host without python2 installed
raw: dnf install -y python2 python2-dnf libselinux-python
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
raw: cat < /tmp/*txt
args:
executable: /bin/bash
- name: safely use templated variables. Always use quote filter to avoid injection issues.
raw: "{{package_mgr|quote}} {{pkg_flags|quote}} install {{python_simplejson|quote}}"
'''
| gpl-3.0 |
arpitn30/open-event-orga-server | app/views/public/explore.py | 7 | 5070 | import json
import requests
from flask import Blueprint
from flask import render_template
from flask import request, redirect, url_for, jsonify
from flask.ext.restplus import abort
from flask_restplus import marshal
from requests import ConnectionError
from app.api.events import EVENT, EVENT_PAGINATED
from app.api.helpers.helpers import get_paginated_list, get_object_list
from app.helpers.data import DataGetter
from app.helpers.flask_ext.helpers import deslugify
from app.helpers.helpers import get_date_range
from app.helpers.static import EVENT_TOPICS
from app.models.event import Event
RESULTS_PER_PAGE = 10
def get_paginated(**kwargs):
current_page = request.args.get('page')
if current_page:
current_page = int(current_page) - 1
if current_page < 0:
abort(404)
else:
current_page = 0
try:
return get_paginated_list(Event, url=request.path, args={
'start': (current_page * RESULTS_PER_PAGE) + 1,
'limit': RESULTS_PER_PAGE,
}, **kwargs)
except:
return {
'start': 0,
'count': 0,
'limit': RESULTS_PER_PAGE,
'results': []
}
def erase_from_dict(d, k):
if isinstance(d, dict):
if k in d.keys():
d.pop(k)
def clean_dict(d):
d = dict(d)
return dict((k, v) for k, v in d.iteritems() if v)
def get_coordinates(location_name):
location = {
'lat': 0.0,
'lng': 0.0
}
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'address': location_name}
response = dict()
try:
response = requests.get(url, params).json()
except ConnectionError:
response['status'] = u'Error'
if response['status'] == u'OK':
location = response['results'][0]['geometry']['location']
return location
explore = Blueprint('explore', __name__, url_prefix='/explore')
@explore.route('/', methods=('GET', 'POST'))
def explore_base():
return redirect(url_for('admin.browse_view'))
@explore.route('/autocomplete/locations.json', methods=('GET', 'POST'))
def locations_autocomplete():
locations = DataGetter.get_locations_of_events()
return jsonify([{'value': location, 'type': 'location'} for location in locations])
@explore.route('/autocomplete/categories.json', methods=('GET', 'POST'))
def categories_autocomplete():
categories = EVENT_TOPICS.keys()
return jsonify([{'value': category, 'type': 'category'} for category in categories])
@explore.route('/autocomplete/events/<location_slug>.json', methods=('GET', 'POST'))
def events_autocomplete(location_slug):
location = deslugify(location_slug)
results = get_object_list(Event, __event_search_location=location)
results = marshal(results, EVENT)
return jsonify([{'value': result['name'], 'type': 'event_name'} for result in results])
@explore.route('/<location>/events/')
def explore_view(location):
placeholder_images = DataGetter.get_event_default_images()
custom_placeholder = DataGetter.get_custom_placeholders()
location = deslugify(location)
query = request.args.get('query', '')
filtering = {'privacy': 'public', 'state': 'Published'}
start, end = None, None
word = request.args.get('query', None)
event_type = request.args.get('type', None)
day_filter = request.args.get('period', None)
sub_category = request.args.get('sub-category', None)
category = request.args.get('category', None)
if day_filter:
start, end = get_date_range(day_filter)
if location and location != 'world':
filtering['__event_search_location'] = location
if word:
filtering['__event_contains'] = word
if category:
filtering['topic'] = category
if sub_category:
filtering['sub_topic'] = sub_category
if event_type:
filtering['type'] = event_type
if start:
filtering['__event_start_time_gt'] = start
if end:
filtering['__event_end_time_lt'] = end
results = marshal(get_paginated(**filtering), EVENT_PAGINATED)
filters = clean_dict(request.args.items())
custom_placeholder_serializable = {}
for custom_placeholder_item in custom_placeholder:
custom_placeholder_serializable[custom_placeholder_item.name] = custom_placeholder_item.thumbnail
return render_template('gentelella/guest/explore/results.html',
results=json.dumps(results['results']),
location=location if location != 'world' else '',
position=json.dumps(get_coordinates(location)),
count=results['count'],
query_args=json.dumps(filters),
placeholder_images=json.dumps(placeholder_images),
custom_placeholder=json.dumps(custom_placeholder_serializable),
categories=EVENT_TOPICS,
results_per_page=RESULTS_PER_PAGE,
query=query)
| gpl-3.0 |
ASOdesk/selenium-pytest-fix | py/test/selenium/webdriver/common/click_scrolling_tests.py | 9 | 7393 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import (
ElementNotVisibleException,
MoveTargetOutOfBoundsException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def testClickingOnAnchorScrollsPage(driver, pages):
scrollScript = """var pageY;
if (typeof(window.pageYOffset) == 'number') {
pageY = window.pageYOffset;
} else {
pageY = document.documentElement.scrollTop;
}
return pageY;"""
pages.load("macbeth.html")
driver.find_element(By.PARTIAL_LINK_TEXT, "last speech").click()
yOffset = driver.execute_script(scrollScript)
# Focusing on to click, but not actually following,
# the link will scroll it in to view, which is a few pixels further than 0
assert yOffset > 300
def testShouldScrollToClickOnAnElementHiddenByOverflow(driver, pages):
pages.load("click_out_of_bounds_overflow.html")
link = driver.find_element(By.ID, "link")
try:
link.click()
except MoveTargetOutOfBoundsException as e:
AssertionError("Should not be out of bounds: %s" % e.msg)
@pytest.mark.xfail_marionette(
reason='https://github.com/w3c/webdriver/issues/408')
def testShouldBeAbleToClickOnAnElementHiddenByOverflow(driver, pages):
pages.load("scroll.html")
link = driver.find_element(By.ID, "line8")
# This used to throw a MoveTargetOutOfBoundsException - we don't expect it to
link.click()
assert "line8" == driver.find_element(By.ID, "clicked").text
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1536',
raises=WebDriverException)
def testShouldBeAbleToClickOnAnElementHiddenByDoubleOverflow(driver, pages):
pages.load("scrolling_tests/page_with_double_overflow_auto.html")
driver.find_element(By.ID, "link").click()
WebDriverWait(driver, 3).until(EC.title_is("Clicked Successfully!"))
def testShouldBeAbleToClickOnAnElementHiddenByYOverflow(driver, pages):
pages.load("scrolling_tests/page_with_y_overflow_auto.html")
driver.find_element(By.ID, "link").click()
WebDriverWait(driver, 3).until(EC.title_is("Clicked Successfully!"))
def testShouldNotScrollOverflowElementsWhichAreVisible(driver, pages):
pages.load("scroll2.html")
list = driver.find_element(By.TAG_NAME, "ul")
item = list.find_element(By.ID, "desired")
item.click()
yOffset = driver.execute_script("return arguments[0].scrollTop", list)
assert 0 == yOffset, "Should not have scrolled"
@pytest.mark.xfail_chrome(
reason='https://bugs.chromium.org/p/chromedriver/issues/detail?id=1542')
def testShouldNotScrollIfAlreadyScrolledAndElementIsInView(driver, pages):
pages.load("scroll3.html")
driver.find_element(By.ID, "button1").click()
scrollTop = getScrollTop(driver)
driver.find_element(By.ID, "button2").click()
assert scrollTop == getScrollTop(driver)
def testShouldBeAbleToClickRadioButtonScrolledIntoView(driver, pages):
pages.load("scroll4.html")
driver.find_element(By.ID, "radio").click()
# If we don't throw, we're good
@pytest.mark.xfail_marionette(
reason='https://github.com/w3c/webdriver/issues/408',
raises=ElementNotVisibleException)
def testShouldScrollOverflowElementsIfClickPointIsOutOfViewButElementIsInView(driver, pages):
pages.load("scroll5.html")
driver.find_element(By.ID, "inner").click()
assert "clicked" == driver.find_element(By.ID, "clicked").text
@pytest.mark.xfail_marionette(
reason='https://github.com/w3c/webdriver/issues/408')
def testShouldBeAbleToClickElementInAFrameThatIsOutOfView(driver, pages):
pages.load("scrolling_tests/page_with_frame_out_of_view.html")
driver.switch_to.frame(driver.find_element_by_name("frame"))
element = driver.find_element(By.NAME, "checkbox")
element.click()
assert element.is_selected()
def testShouldBeAbleToClickElementThatIsOutOfViewInAFrame(driver, pages):
pages.load("scrolling_tests/page_with_scrolling_frame.html")
driver.switch_to.frame(driver.find_element_by_name("scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
def testShouldNotBeAbleToClickElementThatIsOutOfViewInANonScrollableFrame(driver, pages):
pages.load("scrolling_tests/page_with_non_scrolling_frame.html")
driver.switch_to.frame("scrolling_frame")
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
# TODO we should assert that the click was unsuccessful
def testShouldBeAbleToClickElementThatIsOutOfViewInAFrameThatIsOutOfView(driver, pages):
pages.load("scrolling_tests/page_with_scrolling_frame_out_of_view.html")
driver.switch_to.frame(driver.find_element_by_name("scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
def testShouldBeAbleToClickElementThatIsOutOfViewInANestedFrame(driver, pages):
pages.load("scrolling_tests/page_with_nested_scrolling_frames.html")
driver.switch_to.frame(driver.find_element_by_name("scrolling_frame"))
driver.switch_to.frame(driver.find_element_by_name("nested_scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
def testShouldBeAbleToClickElementThatIsOutOfViewInANestedFrameThatIsOutOfView(driver, pages):
pages.load("scrolling_tests/page_with_nested_scrolling_frames_out_of_view.html")
driver.switch_to.frame(driver.find_element_by_name("scrolling_frame"))
driver.switch_to.frame(driver.find_element_by_name("nested_scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
def testShouldNotScrollWhenGettingElementSize(driver, pages):
pages.load("scroll3.html")
scrollTop = getScrollTop(driver)
driver.find_element(By.ID, "button1").size
assert scrollTop == getScrollTop(driver)
def getScrollTop(driver):
return driver.execute_script("return document.body.scrollTop")
@pytest.mark.xfail_marionette(
reason='https://github.com/w3c/webdriver/issues/408')
def testShouldBeAbleToClickElementInATallFrame(driver, pages):
pages.load("scrolling_tests/page_with_tall_frame.html")
driver.switch_to.frame(driver.find_element_by_name("tall_frame"))
element = driver.find_element(By.NAME, "checkbox")
element.click()
assert element.is_selected()
| apache-2.0 |
tykling/django-swingtime | swingtime/conf/swingtime_settings.py | 3 | 1436 | import datetime
# A "strftime" string for formatting start and end time selectors in forms
TIMESLOT_TIME_FORMAT = '%I:%M %p'
# Used for creating start and end time form selectors as well as time slot grids.
# Value should be datetime.timedelta value representing the incremental
# differences between temporal options
TIMESLOT_INTERVAL = datetime.timedelta(minutes=15)
# A datetime.time value indicting the starting time for time slot grids and form
# selectors
TIMESLOT_START_TIME = datetime.time(9)
# A datetime.timedelta value indicating the offset value from
# TIMESLOT_START_TIME for creating time slot grids and form selectors. The for
# using a time delta is that it possible to span dates. For instance, one could
# have a starting time of 3pm (15:00) and wish to indicate a ending value
# 1:30am (01:30), in which case a value of datetime.timedelta(hours=10.5)
# could be specified to indicate that the 1:30 represents the following date's
# time and not the current date.
TIMESLOT_END_TIME_DURATION = datetime.timedelta(hours=+8)
# Indicates a minimum value for the number grid columns to be shown in the time
# slot table.
TIMESLOT_MIN_COLUMNS = 4
# Indicate the default length in time for a new occurrence, specifed by using
# a datetime.timedelta object
DEFAULT_OCCURRENCE_DURATION = datetime.timedelta(hours=+1)
# If not None, passed to the calendar module's setfirstweekday function.
CALENDAR_FIRST_WEEKDAY = 6 | mit |
mahak/neutron | neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py | 2 | 57873 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import atexit
import copy
import datetime
import functools
import operator
import signal
import threading
import types
import uuid
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net
from neutron_lib.api.definitions import segment as segment_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.utils import runtime
from oslo_config import cfg
from oslo_db import exception as os_db_exc
from oslo_log import log
from oslo_utils import timeutils
from neutron._i18n import _
from neutron.common.ovn import acl as ovn_acl
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import extensions as ovn_extensions
from neutron.common.ovn import utils as ovn_utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db
from neutron.db import ovn_revision_numbers_db
from neutron.db import provisioning_blocks
from neutron.extensions import securitygroup as ext_sg
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2.drivers.ovn.agent import neutron_agent as n_agent
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import worker
from neutron.services.logapi.drivers.ovn import driver as log_driver
from neutron.services.qos.drivers.ovn import driver as qos_driver
from neutron.services.segments import db as segment_service_db
from neutron.services.trunk.drivers.ovn import trunk_driver
import neutron.wsgi
LOG = log.getLogger(__name__)
class OVNPortUpdateError(n_exc.BadRequest):
pass
class OVNMechanismDriver(api.MechanismDriver):
"""OVN ML2 mechanism driver
A mechanism driver is called on the creation, update, and deletion
of networks and ports. For every event, there are two methods that
get called - one within the database transaction (method suffix of
_precommit), one right afterwards (method suffix of _postcommit).
Exceptions raised by methods called inside the transaction can
rollback, but should not make any blocking calls (for example,
REST requests to an outside controller). Methods called after
transaction commits can make blocking external calls, though these
will block the entire process. Exceptions raised in calls after
the transaction commits may cause the associated resource to be
deleted.
Because rollback outside of the transaction is not done in the
update network/port case, all data validation must be done within
methods that are part of the database transaction.
"""
resource_provider_uuid5_namespace = uuid.UUID(
'5533233b-800c-11eb-b1f4-000056b2f5b8')
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
LOG.info("Starting OVNMechanismDriver")
self._nb_ovn = None
self._sb_ovn = None
self._plugin_property = None
self._ovn_client_inst = None
self._maintenance_thread = None
self.node_uuid = None
self.hash_ring_group = ovn_const.HASH_RING_ML2_GROUP
self.sg_enabled = ovn_acl.is_sg_enabled()
# NOTE(lucasagomes): _clean_hash_ring() must be called before
# self.subscribe() to avoid processes racing when adding or
# deleting nodes from the Hash Ring during service initialization
self._clean_hash_ring()
self._post_fork_event = threading.Event()
if cfg.CONF.SECURITYGROUP.firewall_driver:
LOG.warning('Firewall driver configuration is ignored')
self._setup_vif_port_bindings()
if impl_idl_ovn.OvsdbSbOvnIdl.schema_has_table('Chassis_Private'):
self.agent_chassis_table = 'Chassis_Private'
else:
self.agent_chassis_table = 'Chassis'
self.subscribe()
self.qos_driver = qos_driver.OVNQosDriver.create(self)
self.trunk_driver = trunk_driver.OVNTrunkDriver.create(self)
self.log_driver = log_driver.register(self)
@property
def nb_schema_helper(self):
return impl_idl_ovn.OvsdbNbOvnIdl.schema_helper
@property
def sb_schema_helper(self):
return impl_idl_ovn.OvsdbSbOvnIdl.schema_helper
@property
def _plugin(self):
if self._plugin_property is None:
self._plugin_property = directory.get_plugin()
return self._plugin_property
@property
def _ovn_client(self):
if self._ovn_client_inst is None:
if not(self._nb_ovn and self._sb_ovn):
# Wait until the post_fork_initialize method has finished and
# IDLs have been correctly setup.
self._post_fork_event.wait()
self._ovn_client_inst = ovn_client.OVNClient(self._nb_ovn,
self._sb_ovn)
return self._ovn_client_inst
@property
def nb_ovn(self):
# NOTE (twilson): This and sb_ovn can be moved to instance variables
# once all references to the private versions are changed
return self._nb_ovn
@property
def sb_ovn(self):
return self._sb_ovn
def check_vlan_transparency(self, context):
"""OVN driver vlan transparency support."""
vlan_transparency_network_types = [
const.TYPE_LOCAL,
const.TYPE_GENEVE,
const.TYPE_VXLAN,
const.TYPE_VLAN
]
return (context.current.get(provider_net.NETWORK_TYPE)
in vlan_transparency_network_types)
def _setup_vif_port_bindings(self):
self.supported_vnic_types = [portbindings.VNIC_NORMAL,
portbindings.VNIC_DIRECT,
portbindings.VNIC_DIRECT_PHYSICAL,
portbindings.VNIC_MACVTAP,
portbindings.VNIC_VHOST_VDPA,
]
self.vif_details = {
portbindings.VIF_TYPE_OVS: {
portbindings.CAP_PORT_FILTER: self.sg_enabled
},
portbindings.VIF_TYPE_VHOST_USER: {
portbindings.CAP_PORT_FILTER: False,
portbindings.VHOST_USER_MODE:
portbindings.VHOST_USER_MODE_SERVER,
portbindings.VHOST_USER_OVS_PLUG: True
},
portbindings.VIF_DETAILS_CONNECTIVITY:
portbindings.CONNECTIVITY_L2,
}
def supported_extensions(self, extensions):
return set(ovn_extensions.ML2_SUPPORTED_API_EXTENSIONS) & extensions
def subscribe(self):
registry.subscribe(self.pre_fork_initialize,
resources.PROCESS,
events.BEFORE_SPAWN)
registry.subscribe(self.post_fork_initialize,
resources.PROCESS,
events.AFTER_INIT)
registry.subscribe(self._add_segment_host_mapping_for_segment,
resources.SEGMENT,
events.AFTER_CREATE)
registry.subscribe(self.create_segment_provnet_port,
resources.SEGMENT,
events.AFTER_CREATE)
registry.subscribe(self.delete_segment_provnet_port,
resources.SEGMENT,
events.AFTER_DELETE)
# Handle security group/rule notifications
if self.sg_enabled:
registry.subscribe(self._create_security_group_precommit,
resources.SECURITY_GROUP,
events.PRECOMMIT_CREATE)
registry.subscribe(self._update_security_group,
resources.SECURITY_GROUP,
events.AFTER_UPDATE)
registry.subscribe(self._create_security_group,
resources.SECURITY_GROUP,
events.AFTER_CREATE)
registry.subscribe(self._delete_security_group,
resources.SECURITY_GROUP,
events.AFTER_DELETE)
registry.subscribe(self._create_sg_rule_precommit,
resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_CREATE)
registry.subscribe(self._process_sg_rule_notification,
resources.SECURITY_GROUP_RULE,
events.AFTER_CREATE)
registry.subscribe(self._process_sg_rule_notification,
resources.SECURITY_GROUP_RULE,
events.BEFORE_DELETE)
def _clean_hash_ring(self, *args, **kwargs):
admin_context = n_context.get_admin_context()
ovn_hash_ring_db.remove_nodes_from_host(admin_context,
self.hash_ring_group)
def pre_fork_initialize(self, resource, event, trigger, payload=None):
"""Pre-initialize the ML2/OVN driver."""
atexit.register(self._clean_hash_ring)
signal.signal(signal.SIGTERM, self._clean_hash_ring)
self._create_neutron_pg_drop()
def _create_neutron_pg_drop(self):
"""Create neutron_pg_drop Port Group.
The method creates a short living connection to the Northbound
database. Because of multiple controllers can attempt to create the
Port Group at the same time the transaction can fail and raise
RuntimeError. In such case, we make sure the Port Group was created,
otherwise the error is something else and it's raised to the caller.
"""
idl = ovsdb_monitor.OvnInitPGNbIdl.from_server(
ovn_conf.get_ovn_nb_connection(), self.nb_schema_helper, self)
with ovsdb_monitor.short_living_ovsdb_api(
impl_idl_ovn.OvsdbNbOvnIdl, idl) as pre_ovn_nb_api:
try:
create_default_drop_port_group(pre_ovn_nb_api)
except RuntimeError as re:
if pre_ovn_nb_api.get_port_group(
ovn_const.OVN_DROP_PORT_GROUP_NAME):
LOG.debug(
"Port Group %(port_group)s already exists, "
"ignoring RuntimeError %(error)s", {
'port_group': ovn_const.OVN_DROP_PORT_GROUP_NAME,
'error': re})
else:
raise
@staticmethod
def should_post_fork_initialize(worker_class):
# By default only API and maintenace workers need to initialize
# the OVN IDL connections
if worker_class in (neutron.wsgi.WorkerService,
worker.MaintenanceWorker):
return True
# Configuration may allow other worker types to use IDL connections.
# Look for a match in additional_worker_classes_with_ovn_idl,
# gracefully skipping unknown classes in the config list.
for worker_type in ovn_conf.additional_worker_classes_with_ovn_idl():
try:
additional_class = runtime.load_class_by_alias_or_classname(
'neutron.worker_classes', worker_type)
if worker_class == additional_class:
return True
except ImportError:
# ignore unknown additional worker class
pass
return False
def post_fork_initialize(self, resource, event, trigger, payload=None):
# Initialize API/Maintenance workers with OVN IDL connections
worker_class = ovn_utils.get_method_class(trigger)
if not self.should_post_fork_initialize(worker_class):
return
self._post_fork_event.clear()
self._wait_for_pg_drop_event()
self._ovn_client_inst = None
if worker_class == neutron.wsgi.WorkerService:
admin_context = n_context.get_admin_context()
self.node_uuid = ovn_hash_ring_db.add_node(admin_context,
self.hash_ring_group)
n_agent.AgentCache(self) # Initialize singleton agent cache
self._nb_ovn, self._sb_ovn = impl_idl_ovn.get_ovn_idls(self, trigger)
# Override agents API methods
self.patch_plugin_merge("get_agents", get_agents)
self.patch_plugin_choose("get_agent", get_agent)
self.patch_plugin_choose("update_agent", update_agent)
self.patch_plugin_choose("delete_agent", delete_agent)
# Override availability zone methods
self.patch_plugin_merge("get_availability_zones",
get_availability_zones)
# Now IDL connections can be safely used.
self._post_fork_event.set()
if worker_class == worker.MaintenanceWorker:
# Call the synchronization task if its maintenance worker
# This sync neutron DB to OVN-NB DB only in inconsistent states
self.nb_synchronizer = ovn_db_sync.OvnNbSynchronizer(
self._plugin,
self._nb_ovn,
self._sb_ovn,
ovn_conf.get_ovn_neutron_sync_mode(),
self
)
self.nb_synchronizer.sync()
# This sync neutron DB to OVN-SB DB only in inconsistent states
self.sb_synchronizer = ovn_db_sync.OvnSbSynchronizer(
self._plugin,
self._sb_ovn,
self
)
self.sb_synchronizer.sync()
self._maintenance_thread = maintenance.MaintenanceThread()
self._maintenance_thread.add_periodics(
maintenance.DBInconsistenciesPeriodics(self._ovn_client))
self._maintenance_thread.add_periodics(
maintenance.HashRingHealthCheckPeriodics(
self.hash_ring_group))
self._maintenance_thread.start()
def _wait_for_pg_drop_event(self):
"""Wait for event that occurs when neutron_pg_drop Port Group exists.
The method creates a short living connection to the Northbound
database. It waits for CREATE event caused by the Port Group.
Such event occurs when:
1) The Port Group doesn't exist and is created by other process.
2) The Port Group already exists and event is emitted when DB copy
is available to the IDL.
"""
idl = ovsdb_monitor.OvnInitPGNbIdl.from_server(
ovn_conf.get_ovn_nb_connection(), self.nb_schema_helper, self,
pg_only=True)
with ovsdb_monitor.short_living_ovsdb_api(
impl_idl_ovn.OvsdbNbOvnIdl, idl) as ovn_nb_api:
ovn_nb_api.idl.neutron_pg_drop_event.wait()
def _create_security_group_precommit(self, resource, event, trigger,
payload):
context = payload.context
security_group = payload.latest_state
ovn_revision_numbers_db.create_initial_revision(
context, security_group['id'],
ovn_const.TYPE_SECURITY_GROUPS,
std_attr_id=security_group['standard_attr_id'])
def _create_security_group(self, resource, event, trigger, payload):
context = payload.context
security_group = payload.latest_state
self._ovn_client.create_security_group(context,
security_group)
def _delete_security_group(self, resource, event, trigger, payload):
context = payload.context
security_group_id = payload.resource_id
self._ovn_client.delete_security_group(context,
security_group_id)
def _update_security_group(self, resource, event, trigger, payload):
# OVN doesn't care about updates to security groups, only if they
# exist or not. We are bumping the revision number here so it
# doesn't show as inconsistent to the maintenance periodic task
context = payload.context
security_group = payload.latest_state
ovn_revision_numbers_db.bump_revision(
context, security_group, ovn_const.TYPE_SECURITY_GROUPS)
def _create_sg_rule_precommit(self, resource, event, trigger,
payload):
sg_rule = payload.latest_state
context = payload.context
ovn_revision_numbers_db.create_initial_revision(
context, sg_rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES,
std_attr_id=sg_rule['standard_attr_id'])
def _process_sg_rule_notification(
self, resource, event, trigger, payload):
context = payload.context
security_group_rule = payload.latest_state
security_group_rule_id = payload.resource_id
if event == events.AFTER_CREATE:
self._ovn_client.create_security_group_rule(
context, security_group_rule)
elif event == events.BEFORE_DELETE:
try:
sg_rule = self._plugin.get_security_group_rule(
context, security_group_rule_id)
except ext_sg.SecurityGroupRuleNotFound:
return
if sg_rule.get('remote_ip_prefix') is not None:
if self._sg_has_rules_with_same_normalized_cidr(sg_rule):
return
self._ovn_client.delete_security_group_rule(
context,
sg_rule)
def _sg_has_rules_with_same_normalized_cidr(self, sg_rule):
compare_keys = [
'ethertype', 'direction', 'protocol',
'port_range_min', 'port_range_max']
sg_rules = self._plugin.get_security_group_rules(
n_context.get_admin_context(),
{'security_group_id': [sg_rule['security_group_id']]})
def _rules_equal(rule1, rule2):
return not any(
rule1.get(key) != rule2.get(key) for key in compare_keys)
for rule in sg_rules:
if not rule.get('remote_ip_prefix') or rule['id'] == sg_rule['id']:
continue
if sg_rule.get('normalized_cidr') != rule.get('normalized_cidr'):
continue
if _rules_equal(sg_rule, rule):
return True
return False
def _is_network_type_supported(self, network_type):
return (network_type in [const.TYPE_LOCAL,
const.TYPE_FLAT,
const.TYPE_GENEVE,
const.TYPE_VXLAN,
const.TYPE_VLAN])
def _get_max_tunid(self):
try:
return int(self._nb_ovn.nb_global.options.get('max_tunid'))
except (ValueError, TypeError):
# max_tunid may be absent in older OVN versions, return None
pass
def _validate_network_segments(self, network_segments):
max_tunid = self._get_max_tunid()
for network_segment in network_segments:
network_type = network_segment['network_type']
segmentation_id = network_segment['segmentation_id']
physical_network = network_segment['physical_network']
LOG.debug('Validating network segment with '
'type %(network_type)s, '
'segmentation ID %(segmentation_id)s, '
'physical network %(physical_network)s',
{'network_type': network_type,
'segmentation_id': segmentation_id,
'physical_network': physical_network})
if not self._is_network_type_supported(network_type):
msg = _('Network type %s is not supported') % network_type
raise n_exc.InvalidInput(error_message=msg)
if segmentation_id and max_tunid and segmentation_id > max_tunid:
m = (
_('Segmentation ID should be lower or equal to %d') %
max_tunid
)
raise n_exc.InvalidInput(error_message=m)
def create_segment_provnet_port(self, resource, event, trigger,
payload=None):
segment = payload.latest_state
if not segment.get(segment_def.PHYSICAL_NETWORK):
return
self._ovn_client.create_provnet_port(segment['network_id'], segment)
def delete_segment_provnet_port(self, resource, event, trigger,
payload):
# NOTE(mjozefcz): Get the last state of segment resource.
segment = payload.states[-1]
if segment.get(segment_def.PHYSICAL_NETWORK):
self._ovn_client.delete_provnet_port(
segment['network_id'], segment)
def create_network_precommit(self, context):
"""Allocate resources for a new network.
:param context: NetworkContext instance describing the new
network.
Create a new network, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
self._validate_network_segments(context.network_segments)
ovn_revision_numbers_db.create_initial_revision(
context._plugin_context, context.current['id'],
ovn_const.TYPE_NETWORKS,
std_attr_id=context.current['standard_attr_id'])
def create_network_postcommit(self, context):
"""Create a network.
:param context: NetworkContext instance describing the new
network.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
network = context.current
self._ovn_client.create_network(context._plugin_context, network)
def update_network_precommit(self, context):
"""Update resources of a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Update values of a network, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_network_precommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
self._validate_network_segments(context.network_segments)
def update_network_postcommit(self, context):
"""Update a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_network_postcommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
# FIXME(lucasagomes): We can delete this conditional after
# https://bugs.launchpad.net/neutron/+bug/1739798 is fixed.
if context._plugin_context.session.is_active:
return
self._ovn_client.update_network(
context._plugin_context, context.current,
original_network=context.original)
def delete_network_postcommit(self, context):
"""Delete a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
self._ovn_client.delete_network(
context._plugin_context,
context.current['id'])
def create_subnet_precommit(self, context):
ovn_revision_numbers_db.create_initial_revision(
context._plugin_context, context.current['id'],
ovn_const.TYPE_SUBNETS,
std_attr_id=context.current['standard_attr_id'])
def create_subnet_postcommit(self, context):
self._ovn_client.create_subnet(context._plugin_context,
context.current,
context.network.current)
def update_subnet_postcommit(self, context):
self._ovn_client.update_subnet(
context._plugin_context, context.current, context.network.current)
def delete_subnet_postcommit(self, context):
self._ovn_client.delete_subnet(context._plugin_context,
context.current['id'])
def _validate_port_extra_dhcp_opts(self, port):
result = ovn_utils.validate_port_extra_dhcp_opts(port)
if not result.failed:
return
ipv4_opts = ', '.join(result.invalid_ipv4)
ipv6_opts = ', '.join(result.invalid_ipv6)
LOG.info('The following extra DHCP options for port %(port_id)s '
'are not supported by OVN. IPv4: "%(ipv4_opts)s" and '
'IPv6: "%(ipv6_opts)s"', {'port_id': port['id'],
'ipv4_opts': ipv4_opts, 'ipv6_opts': ipv6_opts})
def create_port_precommit(self, context):
"""Allocate resources for a new port.
:param context: PortContext instance describing the port.
Create a new port, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
port = context.current
if ovn_utils.is_lsp_ignored(port):
return
ovn_utils.validate_and_get_data_from_binding_profile(port)
self._validate_port_extra_dhcp_opts(port)
if self._is_port_provisioning_required(port, context.host):
self._insert_port_provisioning_block(context._plugin_context,
port['id'])
ovn_revision_numbers_db.create_initial_revision(
context._plugin_context, port['id'], ovn_const.TYPE_PORTS,
std_attr_id=context.current['standard_attr_id'])
# in the case of router ports we also need to
# track the creation and update of the LRP OVN objects
if ovn_utils.is_lsp_router_port(port):
ovn_revision_numbers_db.create_initial_revision(
context._plugin_context, port['id'],
ovn_const.TYPE_ROUTER_PORTS,
std_attr_id=context.current['standard_attr_id'])
def _is_port_provisioning_required(self, port, host, original_host=None):
vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug('No provisioning block for port %(port_id)s due to '
'unsupported vnic_type: %(vnic_type)s',
{'port_id': port['id'], 'vnic_type': vnic_type})
return False
if port['status'] == const.PORT_STATUS_ACTIVE:
LOG.debug('No provisioning block for port %s since it is active',
port['id'])
return False
if not host:
LOG.debug('No provisioning block for port %s since it does not '
'have a host', port['id'])
return False
if host == original_host:
LOG.debug('No provisioning block for port %s since host unchanged',
port['id'])
return False
if not self._sb_ovn.chassis_exists(host):
LOG.debug('No provisioning block for port %(port_id)s since no '
'OVN chassis for host: %(host)s',
{'port_id': port['id'], 'host': host})
return False
return True
def _insert_port_provisioning_block(self, context, port_id):
# Insert a provisioning block to prevent the port from
# transitioning to active until OVN reports back that
# the port is up.
provisioning_blocks.add_provisioning_component(
context, port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY
)
def _notify_dhcp_updated(self, port_id):
"""Notifies Neutron that the DHCP has been update for port."""
admin_context = n_context.get_admin_context()
if provisioning_blocks.is_object_blocked(
admin_context, port_id, resources.PORT):
provisioning_blocks.provisioning_complete(
admin_context, port_id, resources.PORT,
provisioning_blocks.DHCP_ENTITY)
def _validate_ignored_port(self, port, original_port):
if ovn_utils.is_lsp_ignored(port):
if not ovn_utils.is_lsp_ignored(original_port):
# From not ignored port to ignored port
msg = (_('Updating device_owner to %(device_owner)s for port '
'%(port_id)s is not supported') %
{'device_owner': port['device_owner'],
'port_id': port['id']})
raise OVNPortUpdateError(resource='port', msg=msg)
elif ovn_utils.is_lsp_ignored(original_port):
# From ignored port to not ignored port
msg = (_('Updating device_owner for port %(port_id)s owned by '
'%(device_owner)s is not supported') %
{'port_id': port['id'],
'device_owner': original_port['device_owner']})
raise OVNPortUpdateError(resource='port', msg=msg)
def create_port_postcommit(self, context):
"""Create a port.
:param context: PortContext instance describing the port.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
"""
port = copy.deepcopy(context.current)
port['network'] = context.network.current
self._ovn_client.create_port(context._plugin_context, port)
self._notify_dhcp_updated(port['id'])
def update_port_precommit(self, context):
"""Update resources of a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called inside transaction context on session to complete a
port update as defined by this mechanism driver. Raising an
exception will result in rollback of the transaction.
update_port_precommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
port = context.current
original_port = context.original
self._validate_ignored_port(port, original_port)
ovn_utils.validate_and_get_data_from_binding_profile(port)
self._validate_port_extra_dhcp_opts(port)
if self._is_port_provisioning_required(port, context.host,
context.original_host):
self._insert_port_provisioning_block(context._plugin_context,
port['id'])
if ovn_utils.is_lsp_router_port(port):
# handle the case when an existing port is added to a
# logical router so we need to track the creation of the lrp
if not ovn_utils.is_lsp_router_port(original_port):
ovn_revision_numbers_db.create_initial_revision(
context._plugin_context, port['id'],
ovn_const.TYPE_ROUTER_PORTS, may_exist=True,
std_attr_id=context.current['standard_attr_id'])
def update_port_postcommit(self, context):
"""Update a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
update_port_postcommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
port = copy.deepcopy(context.current)
port['network'] = context.network.current
original_port = copy.deepcopy(context.original)
original_port['network'] = context.network.current
# NOTE(mjozefcz): Check if port is in migration state. If so update
# the port status from DOWN to UP in order to generate 'fake'
# vif-interface-plugged event. This workaround is needed to
# perform live-migration with live_migration_wait_for_vif_plug=True.
if ((port['status'] == const.PORT_STATUS_DOWN and
ovn_const.MIGRATING_ATTR in port[portbindings.PROFILE].keys() and
port[portbindings.VIF_TYPE] in (
portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER))):
LOG.info("Setting port %s status from DOWN to UP in order "
"to emit vif-interface-plugged event.",
port['id'])
self._plugin.update_port_status(context._plugin_context,
port['id'],
const.PORT_STATUS_ACTIVE)
# The revision has been changed. In the meantime
# port-update event already updated the OVN configuration,
# So there is no need to update it again here. Anyway it
# will fail that OVN has port with bigger revision.
return
self._ovn_client.update_port(context._plugin_context, port,
port_object=original_port)
self._notify_dhcp_updated(port['id'])
def delete_port_postcommit(self, context):
"""Delete a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
port = copy.deepcopy(context.current)
port['network'] = context.network.current
# FIXME(lucasagomes): PortContext does not have a session, therefore
# we need to use the _plugin_context attribute.
self._ovn_client.delete_port(context._plugin_context, port['id'],
port_object=port)
def bind_port(self, context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
This method is called outside any transaction to attempt to
establish a port binding using this mechanism driver. Bindings
may be created at each of multiple levels of a hierarchical
network, and are established from the top level downward. At
each level, the mechanism driver determines whether it can
bind to any of the network segments in the
context.segments_to_bind property, based on the value of the
context.host property, any relevant port or network
attributes, and its own knowledge of the network topology. At
the top level, context.segments_to_bind contains the static
segments of the port's network. At each lower level of
binding, it contains static or dynamic segments supplied by
the driver that bound at the level above. If the driver is
able to complete the binding of the port to any segment in
context.segments_to_bind, it must call context.set_binding
with the binding details. If it can partially bind the port,
it must call context.continue_binding with the network
segments to be used to bind at the next lower level.
If the binding results are committed after bind_port returns,
they will be seen by all mechanism drivers as
update_port_precommit and update_port_postcommit calls. But if
some other thread or process concurrently binds or updates the
port, these binding results will not be committed, and
update_port_precommit and update_port_postcommit will not be
called on the mechanism drivers with these results. Because
binding results can be discarded rather than committed,
drivers should avoid making persistent state changes in
bind_port, or else must ensure that such state changes are
eventually cleaned up.
Implementing this method explicitly declares the mechanism
driver as having the intention to bind ports. This is inspected
by the QoS service to identify the available QoS rules you
can use with ports.
"""
port = context.current
vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug('Refusing to bind port %(port_id)s due to unsupported '
'vnic_type: %(vnic_type)s',
{'port_id': port['id'], 'vnic_type': vnic_type})
return
if ovn_utils.is_port_external(port):
LOG.debug("Refusing to bind port due to unsupported vnic_type: %s "
"with no switchdev capability", vnic_type)
return
# OVN chassis information is needed to ensure a valid port bind.
# Collect port binding data and refuse binding if the OVN chassis
# cannot be found.
chassis_physnets = []
try:
datapath_type, iface_types, chassis_physnets = (
self._sb_ovn.get_chassis_data_for_ml2_bind_port(context.host))
iface_types = iface_types.split(',') if iface_types else []
except RuntimeError:
LOG.debug('Refusing to bind port %(port_id)s due to '
'no OVN chassis for host: %(host)s',
{'port_id': port['id'], 'host': context.host})
return
for segment_to_bind in context.segments_to_bind:
network_type = segment_to_bind['network_type']
segmentation_id = segment_to_bind['segmentation_id']
physical_network = segment_to_bind['physical_network']
LOG.debug('Attempting to bind port %(port_id)s on host %(host)s '
'for network segment with type %(network_type)s, '
'segmentation ID %(segmentation_id)s, '
'physical network %(physical_network)s',
{'port_id': port['id'],
'host': context.host,
'network_type': network_type,
'segmentation_id': segmentation_id,
'physical_network': physical_network})
# TODO(rtheis): This scenario is only valid on an upgrade from
# neutron ML2 OVS since invalid network types are prevented during
# network creation and update. The upgrade should convert invalid
# network types. Once bug/1621879 is fixed, refuse to bind
# ports with unsupported network types.
if not self._is_network_type_supported(network_type):
LOG.info('Upgrade allowing bind port %(port_id)s with '
'unsupported network type: %(network_type)s',
{'port_id': port['id'],
'network_type': network_type})
if ((network_type in ['flat', 'vlan']) and
(physical_network not in chassis_physnets)):
LOG.info('Refusing to bind port %(port_id)s on '
'host %(host)s due to the OVN chassis '
'bridge mapping physical networks '
'%(chassis_physnets)s not supporting '
'physical network: %(physical_network)s',
{'port_id': port['id'],
'host': context.host,
'chassis_physnets': chassis_physnets,
'physical_network': physical_network})
else:
if (datapath_type == ovn_const.CHASSIS_DATAPATH_NETDEV and
ovn_const.CHASSIS_IFACE_DPDKVHOSTUSER in iface_types):
vhost_user_socket = ovn_utils.ovn_vhu_sockpath(
ovn_conf.get_ovn_vhost_sock_dir(), port['id'])
vif_type = portbindings.VIF_TYPE_VHOST_USER
port[portbindings.VIF_DETAILS].update({
portbindings.VHOST_USER_SOCKET: vhost_user_socket})
vif_details = dict(self.vif_details[vif_type])
vif_details[portbindings.VHOST_USER_SOCKET] = (
vhost_user_socket)
else:
vif_type = portbindings.VIF_TYPE_OVS
vif_details = self.vif_details[vif_type]
context.set_binding(segment_to_bind[api.ID], vif_type,
vif_details)
break
def get_workers(self):
"""Get any worker instances that should have their own process
Any driver that needs to run processes separate from the API or RPC
workers, can return a sequence of worker instances.
"""
# See doc/source/design/ovn_worker.rst for more details.
return [worker.MaintenanceWorker()]
def _update_dnat_entry_if_needed(self, port_id, up=True):
"""Update DNAT entry if using distributed floating ips."""
if not self._nb_ovn:
self._nb_ovn = self._ovn_client._nb_idl
nat = self._nb_ovn.db_find('NAT',
('logical_port', '=', port_id),
('type', '=', 'dnat_and_snat')).execute()
if not nat:
return
# We take first entry as one port can only have one FIP
nat = nat[0]
# If the external_id doesn't exist, let's create at this point.
# TODO(dalvarez): Remove this code in T cycle when we're sure that
# all DNAT entries have the external_id.
if not nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY):
self._nb_ovn.db_set('NAT', nat['_uuid'],
('external_ids',
{ovn_const.OVN_FIP_EXT_MAC_KEY:
nat['external_mac']})).execute()
if up and ovn_conf.is_ovn_distributed_floating_ip():
mac = nat['external_ids'][ovn_const.OVN_FIP_EXT_MAC_KEY]
if nat['external_mac'] != mac:
LOG.debug("Setting external_mac of port %s to %s",
port_id, mac)
self._nb_ovn.db_set(
'NAT', nat['_uuid'], ('external_mac', mac)).execute(
check_error=True)
else:
if nat['external_mac']:
LOG.debug("Clearing up external_mac of port %s", port_id)
self._nb_ovn.db_clear(
'NAT', nat['_uuid'], 'external_mac').execute(
check_error=True)
def _should_notify_nova(self, db_port):
# NOTE(twilson) It is possible for a test to override a config option
# after the plugin has been initialized so the nova_notifier attribute
# is not set on the plugin
return (cfg.CONF.notify_nova_on_port_status_changes and
hasattr(self._plugin, 'nova_notifier') and
db_port.device_owner.startswith(
const.DEVICE_OWNER_COMPUTE_PREFIX))
def set_port_status_up(self, port_id):
# Port provisioning is complete now that OVN has reported that the
# port is up. Any provisioning block (possibly added during port
# creation or when OVN reports that the port is down) must be removed.
LOG.info("OVN reports status up for port: %s", port_id)
self._update_dnat_entry_if_needed(port_id)
admin_context = n_context.get_admin_context()
provisioning_blocks.provisioning_complete(
admin_context,
port_id,
resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
try:
# NOTE(lucasagomes): Router ports in OVN is never bound
# to a host given their decentralized nature. By calling
# provisioning_complete() - as above - don't do it for us
# becasue the router ports are unbind so, for OVN we are
# forcing the status here. Maybe it's something that we can
# change in core Neutron in the future.
db_port = ml2_db.get_port(admin_context, port_id)
if not db_port:
return
if db_port.device_owner in (const.DEVICE_OWNER_ROUTER_INTF,
const.DEVICE_OWNER_DVR_INTERFACE,
const.DEVICE_OWNER_ROUTER_HA_INTF):
self._plugin.update_port_status(admin_context, port_id,
const.PORT_STATUS_ACTIVE)
elif self._should_notify_nova(db_port):
self._plugin.nova_notifier.notify_port_active_direct(db_port)
except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
LOG.debug('Port not found during OVN status up report: %s',
port_id)
def set_port_status_down(self, port_id):
# Port provisioning is required now that OVN has reported that the
# port is down. Insert a provisioning block and mark the port down
# in neutron. The block is inserted before the port status update
# to prevent another entity from bypassing the block with its own
# port status update.
LOG.info("OVN reports status down for port: %s", port_id)
self._update_dnat_entry_if_needed(port_id, False)
admin_context = n_context.get_admin_context()
try:
db_port = ml2_db.get_port(admin_context, port_id)
if not db_port:
return
self._insert_port_provisioning_block(admin_context, port_id)
self._plugin.update_port_status(admin_context, port_id,
const.PORT_STATUS_DOWN)
if self._should_notify_nova(db_port):
self._plugin.nova_notifier.record_port_status_changed(
db_port, const.PORT_STATUS_ACTIVE, const.PORT_STATUS_DOWN,
None)
self._plugin.nova_notifier.send_port_status(
None, None, db_port)
except (os_db_exc.DBReferenceError, n_exc.PortNotFound):
LOG.debug("Port not found during OVN status down report: %s",
port_id)
def delete_mac_binding_entries(self, external_ip):
"""Delete all MAC_Binding entries associated to this IP address"""
mac_binds = self._sb_ovn.db_find_rows(
'MAC_Binding', ('ip', '=', external_ip)).execute() or []
for entry in mac_binds:
self._sb_ovn.db_destroy('MAC_Binding', entry.uuid).execute()
def update_segment_host_mapping(self, host, phy_nets):
"""Update SegmentHostMapping in DB"""
if not host:
return
ctx = n_context.get_admin_context()
segments = segment_service_db.get_segments_with_phys_nets(
ctx, phy_nets)
available_seg_ids = {
segment['id'] for segment in segments
if segment['network_type'] in ('flat', 'vlan')}
segment_service_db.update_segment_host_mapping(
ctx, host, available_seg_ids)
def _add_segment_host_mapping_for_segment(self, resource, event, trigger,
payload=None):
context = payload.context
segment = payload.latest_state
phynet = segment.physical_network
if not phynet:
return
host_phynets_map = self._sb_ovn.get_chassis_hostname_and_physnets()
hosts = {host for host, phynets in host_phynets_map.items()
if phynet in phynets}
segment_service_db.map_segment_to_hosts(context, segment.id, hosts)
def patch_plugin_merge(self, method_name, new_fn, op=operator.add):
old_method = getattr(self._plugin, method_name)
@functools.wraps(old_method)
def fn(slf, *args, **kwargs):
new_method = types.MethodType(new_fn, self._plugin)
results = old_method(*args, **kwargs)
return op(results, new_method(*args, _driver=self, **kwargs))
setattr(self._plugin, method_name, types.MethodType(fn, self._plugin))
def patch_plugin_choose(self, method_name, new_fn):
old_method = getattr(self._plugin, method_name)
@functools.wraps(old_method)
def fn(slf, *args, **kwargs):
new_method = types.MethodType(new_fn, self._plugin)
try:
return new_method(*args, _driver=self, **kwargs)
except n_exc.NotFound:
return old_method(*args, **kwargs)
setattr(self._plugin, method_name, types.MethodType(fn, self._plugin))
def ping_all_chassis(self):
"""Update NB_Global.nb_cfg so that Chassis.nb_cfg will increment
:returns: (bool) True if nb_cfg was updated. False if it was updated
recently and this call didn't trigger any update.
"""
last_ping = self._nb_ovn.nb_global.external_ids.get(
ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY)
if last_ping:
interval = max(cfg.CONF.agent_down_time // 2, 1)
next_ping = (timeutils.parse_isotime(last_ping) +
datetime.timedelta(seconds=interval))
if timeutils.utcnow(with_timezone=True) < next_ping:
return False
with self._nb_ovn.create_transaction(check_error=True,
bump_nb_cfg=True) as txn:
txn.add(self._nb_ovn.check_liveness())
return True
def list_availability_zones(self, context, filters=None):
"""List all availability zones from gateway chassis."""
azs = {}
# TODO(lucasagomes): In the future, once the agents API in OVN
# gets more stable we should consider getting the information from
# the availability zones from the agents API itself. That would
# allow us to do things like: Do not schedule router ports on
# chassis that are offline (via the "alive" attribute for agents).
for ch in self._sb_ovn.chassis_list().execute(check_error=True):
# Only take in consideration gateway chassis because that's where
# the router ports are scheduled on
if not ovn_utils.is_gateway_chassis(ch):
continue
azones = ovn_utils.get_chassis_availability_zones(ch)
for azone in azones:
azs[azone] = {'name': azone, 'resource': 'router',
'state': 'available',
'tenant_id': context.project_id}
return azs
def get_agents(self, context, filters=None, fields=None, _driver=None):
_driver.ping_all_chassis()
filters = filters or {}
agent_list = []
for agent in n_agent.AgentCache():
agent_dict = agent.as_dict()
if all(agent_dict[k] in v for k, v in filters.items()):
agent_list.append(agent_dict)
return agent_list
def get_agent(self, context, id, fields=None, _driver=None):
try:
return n_agent.AgentCache()[id].as_dict()
except KeyError:
raise n_exc.agent.AgentNotFound(id=id)
def update_agent(self, context, id, agent, _driver=None):
ovn_agent = get_agent(self, None, id, _driver=_driver)
chassis_name = ovn_agent['configurations']['chassis_name']
agent_type = ovn_agent['agent_type']
agent = agent['agent']
# neutron-client always passes admin_state_up, openstack client doesn't
# and we can just fall through to raising in the case that admin_state_up
# is being set to False, otherwise the end-state will be fine
if not agent.get('admin_state_up', True):
pass
elif 'description' in agent:
_driver._sb_ovn.set_chassis_neutron_description(
chassis_name, agent['description'],
agent_type).execute(check_error=True)
return agent
else:
# admin_state_up=True w/o description
return agent
raise n_exc.BadRequest(resource='agent',
msg='OVN agent status cannot be updated')
def delete_agent(self, context, id, _driver=None):
# raise AgentNotFound if this isn't an ml2/ovn-related agent
agent = get_agent(self, None, id, _driver=_driver)
# NOTE(twilson) According to the API docs, an agent must be disabled
# before deletion. Otherwise, behavior seems to be undefined. We could
# check that alive=False before allowing deletion, but depending on the
# agent_down_time setting, that could take quite a while.
# If ovn-controller is up, the Chassis will be recreated and so the agent
# will still show as up. The recreated Chassis will cause all kinds of
# events to fire. But again, undefined behavior.
chassis_name = agent['configurations']['chassis_name']
_driver._sb_ovn.chassis_del(chassis_name, if_exists=True).execute(
check_error=True)
# Send a specific event that all API workers can get to delete the agent
# from their caches. Ideally we could send a single transaction that both
# created and deleted the key, but alas python-ovs is too "smart"
_driver._sb_ovn.db_set(
'SB_Global', '.', ('external_ids', {'delete_agent': str(id)})).execute(
check_error=True)
_driver._sb_ovn.db_remove(
'SB_Global', '.', 'external_ids', delete_agent=str(id),
if_exists=True).execute(check_error=True)
def create_default_drop_port_group(nb_idl):
pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME
if nb_idl.get_port_group(pg_name):
LOG.debug("Port Group %s already exists", pg_name)
return
with nb_idl.transaction(check_error=True) as txn:
# If drop Port Group doesn't exist yet, create it.
txn.add(nb_idl.pg_add(pg_name, acls=[], may_exist=True))
# Add ACLs to this Port Group so that all traffic is dropped.
acls = ovn_acl.add_acls_for_drop_port_group(pg_name)
for acl in acls:
txn.add(nb_idl.pg_acl_add(may_exist=True, **acl))
ports_with_pg = set()
for pg in nb_idl.get_sg_port_groups().values():
ports_with_pg.update(pg['ports'])
if ports_with_pg:
# Add the ports to the default Port Group
txn.add(nb_idl.pg_add_ports(pg_name, list(ports_with_pg)))
def get_availability_zones(cls, context, _driver, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
return list(_driver.list_availability_zones(context, filters).values())
| apache-2.0 |
hujiajie/chromium-crosswalk | tools/perf/page_sets/startup_pages.py | 18 | 2069 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class BrowserStartupSharedState(shared_page_state.SharedPageState):
"""Shared state that restarts the browser for every single story."""
def __init__(self, test, finder_options, story_set):
super(BrowserStartupSharedState, self).__init__(
test, finder_options, story_set)
def DidRunStory(self, results):
super(BrowserStartupSharedState, self).DidRunStory(results)
self._StopBrowser()
class StartedPage(page_module.Page):
def __init__(self, url, page_set):
super(StartedPage, self).__init__(
url=url, page_set=page_set, startup_url=url,
shared_page_state_class=BrowserStartupSharedState)
self.archive_data_file = 'data/startup_pages.json'
def RunNavigateSteps(self, action_runner):
# Do not call super.RunNavigateSteps() to avoid reloading the page that has
# already been opened with startup_url.
# TODO(gabadie): Get rid of this (crbug.com/555504)
action_runner.Wait(10)
def RunPageInteractions(self, action_runner):
self.RunNavigateSteps(action_runner)
class StartupPagesPageSet(story.StorySet):
"""Pages for testing starting Chrome with a URL.
Note that this file can't be used with record_wpr, since record_wpr requires
a true navigate step, which we do not want for startup testing. Instead use
record_wpr startup_pages_record to record data for this test."""
def __init__(self):
super(StartupPagesPageSet, self).__init__(
archive_data_file='data/startup_pages.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
# Typical page.
self.AddStory(StartedPage('about:blank', self))
# Typical page.
self.AddStory(StartedPage('http://bbc.co.uk', self))
# Horribly complex page - stress test!
self.AddStory(StartedPage('http://kapook.com', self))
| bsd-3-clause |
antworteffekt/EDeN | graphprot/prepare_graphprot_seqs.py | 1 | 10394 | #!/usr/bin/env python
# draft implementation
# * TODO:
# * centering should be optional
# * viewpoint should be optional
# * check for nonunique ids and warn
# * check for bedtools version
# * write bed files for sequence coordinates
# * set rnd init for shuffling to have reproducible results
# * use my own temporary sequence files, properly clean up afterwards
# * check if seq length and core length arguments match or handle properly
# * handle input/output error gracefully
# * check if input bed coordinates are stranded
from __future__ import print_function
import argparse
from csv import reader
from itertools import izip
import logging
from eden.util import configure_logging
from pybedtools.featurefuncs import midpoint
from pybedtools.helpers import get_chromsizes_from_ucsc
from pybedtools import BedTool
# parse command line arguments
# positional arguments
parser = argparse.ArgumentParser(
description="Create coordinates and fasta sequences for use with GraphProt.")
parser.add_argument(
"bsites_fn", help="Path to binding site coordiantes in bed format")
parser.add_argument(
"genome_id", help="Genome UCSC id")
parser.add_argument(
"genome_fa_fn", help="Genome fasta sequences")
# optional arguments
parser.add_argument(
"--seq_length",
type=int,
default=150,
help="Length of sequences to create")
parser.add_argument(
"--core_length",
type=int,
default=48,
help="Length of viewpoint region at center of sequence")
parser.add_argument(
"--output_file_prefix",
default="",
help="Prefix to use for output filenames")
parser.add_argument(
"--chromosome_limits",
help="Path to file containing chromosome limites as required by bedtools. Use this parameter disables automatic lookup via the genome id.")
parser.add_argument(
"--negative_site_candidate_regions_fn",
help="Path to regions considered for placement of negatives in bed format")
parser.add_argument(
"-v", "--verbosity",
action="count",
help="Increase output verbosity")
args = parser.parse_args()
logger = logging.getLogger()
configure_logging(logger, verbosity=args.verbosity)
# fixed global variables
npeek = 2
# check chromsizes retreival
if (args.chromosome_limits is None):
# check if genome_id can be found,
chromsizes = get_chromsizes_from_ucsc(args.genome_id)
logging.debug("Number of chromosomes: {}.".format(len(chromsizes)))
# otherwise request manual definition of chromosome limits
if (len(chromsizes) == 0):
logging.error("Error: retrieving chromosome sizes from UCSC failed. Please specify manually using parameter --chromosome_limits")
exit(1)
# output file arguments
pos_core_bed_fn = args.output_file_prefix + ".positives_core.bed"
neg_core_bed_fn = args.output_file_prefix + ".negatives_core.bed"
# TODO: use
pos_seq_bed_fn = args.output_file_prefix + ".positives_seq.bed"
# TODO: use
neg_seq_bed_fn = args.output_file_prefix + ".negatives_seq.bed"
pos_seq_fa_fn = args.output_file_prefix + ".positives.fa"
neg_seq_fa_fn = args.output_file_prefix + ".negatives.fa"
# calculate flank lengths
flank_length = args.seq_length - args.core_length
flank_upstream_length = int(flank_length / 2)
flank_downstream_length = int(flank_length / 2) + (flank_length % 2)
if (args.core_length + flank_upstream_length + flank_downstream_length != args.seq_length):
raise Exception("Error: bad length calculation.")
def dbg_head(sites, description="", n=npeek, run=args.debug):
"""Print the first few bed entries."""
if run:
logging.debug(description)
for i in sites[0:n]:
logging.debug(i)
def prefix_neg(feature, prefix="negative_from_"):
"""Modify BedTool feature by adding a prefix."""
feature.name = prefix + feature.name
return feature
def offset_zero_by_one(feature):
"""Sets the start coordinate to 1 if it is actually 0.
Required for the flanking to work properly in those cases.
"""
if feature.start == 0:
feature.start += 1
return feature
def get_flanks(cores,
flank_upstream_length=flank_upstream_length,
flank_downstream_length=flank_downstream_length):
"""Calculate flanking regions of a core region."""
if args.chromosome_limits is not None:
logging.debug("using chromosome_limits " + args.chromosome_limits)
# get upstream flanks
flanks_upstream = cores.flank(
s=True,
l=flank_upstream_length,
r=0,
g=args.chromosome_limits).saveas()
# get downstream flanks
flanks_downstream = cores.flank(
s=True,
r=flank_downstream_length,
l=0,
g=args.chromosome_limits).saveas()
else:
# get upstream flanks
flanks_upstream = cores.flank(
s=True,
l=flank_upstream_length,
r=0,
genome=args.genome_id).saveas()
# get downstream flanks
flanks_downstream = cores.flank(
s=True,
r=flank_downstream_length,
l=0,
genome=args.genome_id).saveas()
# check if sites and flanks have the same number of entries
if cores.count() == flanks_upstream.count() == flanks_downstream.count():
return flanks_upstream, flanks_downstream
else:
if args.debug:
cores.saveas("debug_cores.bed")
flanks_upstream.saveas("debug_upstream.bed")
flanks_downstream.saveas("debug_downstream.bed")
else:
cores.saveas()
flanks_upstream.saveas()
flanks_downstream.saveas()
raise Exception("Error: numbers of cores and flanks don't match: got " + str(cores.count()) + " cores, " + str(
flanks_upstream.count()) + " upstream flanks and " + str(flanks_downstream.count()) + " downstream flanks.")
def get_seqs(cores,
flanks_upstream,
flanks_downstream,
viewpointfa_fn,
genome_fa_fn=args.genome_fa_fn):
"""Prepare sequences and write them to disk."""
# get sequences
genome_fa = BedTool(genome_fa_fn)
cores = cores.sequence(
fi=genome_fa,
s=True,
tab=True, name=True).save_seqs(cores.fn + ".tabseq")
flanks_upstream = flanks_upstream.sequence(
fi=genome_fa,
s=True,
tab=True,
name=True).save_seqs(flanks_upstream.fn + ".tabseq")
flanks_downstream = flanks_downstream.sequence(
fi=genome_fa,
s=True,
tab=True,
name=True).save_seqs(flanks_downstream.fn + ".tabseq")
# write sequences to disk
fup_seq_fn = flanks_upstream.seqfn
cores_seq_fn = cores.seqfn
fdown_seq_fn = flanks_downstream.seqfn
viewpointfa = open(viewpointfa_fn, "wb")
with open(fup_seq_fn, "rb") as fup_tabseq, open(cores_seq_fn, "rb") as core_tabseq, open(fdown_seq_fn, "rb") as fdown_tabseq:
fup_reader = reader(fup_tabseq, delimiter="\t")
core_reader = reader(core_tabseq, delimiter="\t")
fdown_reader = reader(fdown_tabseq, delimiter="\t")
for fup, core, fdown in izip(fup_reader, core_reader, fdown_reader):
assert fup[0] == core[0] == fdown[0], "Error: sequence ids of cores and flanks don't match."
# setup fasta headers and sequences
fa_header = ">" + core[0]
seq_viewpoint = fup[1].lower() + core[1].upper() + fdown[1].lower()
# seq_normal = fup[1].upper() + core[1].upper() + fdown[1].upper()
viewpointfa.write(fa_header + "\n")
viewpointfa.write(seq_viewpoint + "\n")
viewpointfa.close()
# prepare input coordinates
bsites = BedTool(args.bsites_fn).sort().saveas()
centers = bsites.each(midpoint).saveas()
# prepare positive instances
logging.info("preparing positive instances")
if (args.chromosome_limits):
logging.debug("using chromosome_limits " + args.chromosome_limits)
cores = centers.slop(s=True,
l=int(args.core_length / 2),
# -1 to account for the center nucleotide!
r=int(args.core_length / 2) +
(args.core_length % 2) - 1,
g=args.chromosome_limits).each(offset_zero_by_one).saveas(pos_core_bed_fn)
else:
cores = centers.slop(s=True,
l=int(args.core_length / 2),
# -1 to account for the center nucleotide!
r=int(args.core_length / 2) +
(args.core_length % 2) - 1,
genome=args.genome_id).each(offset_zero_by_one).saveas(pos_core_bed_fn)
flanks_upstream, flanks_downstream = get_flanks(cores)
get_seqs(cores, flanks_upstream, flanks_downstream, pos_seq_fa_fn)
# prepare negative sites if requested
if args.negative_site_candidate_regions_fn:
# get negative candidate regions
negative_site_candidate_regions = BedTool(
args.negative_site_candidate_regions_fn)
# remove input binding sites from negative candidate regions
processed_negative_site_candidate_regions = negative_site_candidate_regions.subtract(
bsites,
s=True).saveas()
# create negative core sites by placing within candidate regions
logging.info("preparing negative instances")
logging.info("starting from " + str(cores.count()) + " positive cores")
if args.chromosome_limits:
logging.debug("using chromosome_limits " + args.chromosome_limits)
neg_cores = cores.shuffle(
g=args.chromosome_limits,
chrom=True,
incl=processed_negative_site_candidate_regions.fn,
noOverlapping=True).each(prefix_neg).saveas(neg_core_bed_fn)
logging.info("derived negative cores: " + str(neg_cores.count()))
neg_fup, neg_fdown = get_flanks(neg_cores)
get_seqs(neg_cores, neg_fup, neg_fdown, neg_seq_fa_fn)
else:
neg_cores = cores.shuffle(
genome=args.genome_id,
chrom=True,
incl=processed_negative_site_candidate_regions.fn,
noOverlapping=True).each(prefix_neg).saveas(neg_core_bed_fn)
logging.info("derived negative cores: " + str(neg_cores.count()))
neg_fup, neg_fdown = get_flanks(neg_cores)
get_seqs(neg_cores, neg_fup, neg_fdown, neg_seq_fa_fn)
| mit |
fnp/pylucene | samples/LuceneInAction/lia/analysis/stopanalyzer/StopAnalyzer2.py | 2 | 1203 | # ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from lucene import \
LetterTokenizer, LowerCaseFilter, StopAnalyzer, StopFilter
#
# An Analyzer extension
#
class StopAnalyzer2(object):
def __init__(self, stopWords=None):
if stopWords is None:
self.stopWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET
else:
self.stopWords = stopWords
def tokenStream(self, fieldName, reader):
return StopFilter(True, LowerCaseFilter(LetterTokenizer(reader)),
self.stopWords)
| apache-2.0 |
wshallum/ansible | lib/ansible/modules/windows/win_file.py | 25 | 2790 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories.
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
Unlike M(file), does not modify ownership, permissions or manipulate links.
notes:
- See also M(win_copy), M(win_template), M(copy), M(template), M(assemble)
requirements: [ ]
author: "Jon Hawkesworth (@jhawkesworth)"
options:
path:
description:
- 'path to the file being managed. Aliases: I(dest), I(name)'
required: true
default: []
aliases: ['dest', 'name']
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
required: false
default: file
choices: [ file, directory, touch, absent ]
'''
EXAMPLES = '''
- name: Create a file
win_file:
path: C:\temp\foo.conf
state: file
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\temp
state: absent
'''
| gpl-3.0 |
vnaydionov/card-proxy | tests/secvault_tests.py | 1 | 3752 | # -*- coding: utf-8 -*-
import os
import sys
import unittest
import datetime as dt
from utils import generate_random_string, generate_random_number
from secvault_api import SecureVaultApi
import logger
log = logger.get_logger('/tmp/secvault_tests-%s.log' % os.environ['USER'])
SERVER_URI = 'http://localhost:17113/'
def log_func_context(func):
def inner(*args, **kwargs):
log.debug('---- Start [%s] ----', func.func_name)
result = func(*args, **kwargs)
log.debug('---- Start [%s] ----', func.func_name)
return result
return inner
class TestSecureVaultApi(unittest.TestCase):
'''
tokenize_card, detokenize_card, remove_card, etc.
'''
def __init__(self, *args, **kwargs):
super(TestSecureVaultApi, self).__init__(*args, **kwargs)
self.server_uri = SERVER_URI
def _create_new_token(self, plain_text, params=None):
p = SecureVaultApi(self.server_uri,
user='tokenize_only', passwd='qwerty', domain='mydomain')
new_params = {
'plain_text': plain_text,
'expire_ts': (dt.datetime.now() +
dt.timedelta(days=1500)).isoformat()[:19],
'dedup': 'false',
'notify_email': 'someone@somedomain',
}
new_params.update(params or {})
status, resp, f_time = p.tokenize(new_params)
return resp
def _unwrap_token(self, token, params=None):
p = SecureVaultApi(self.server_uri,
user='detokenize_only', passwd='qwerty', domain='mydomain')
new_params = {
'token': token,
}
new_params.update(params or {})
status, resp, f_time = p.detokenize(new_params)
return resp
@log_func_context
def test_tokenize(self):
secret = generate_random_string(1000)
resp = self._create_new_token(secret)
self.assertEqual('success', resp.findtext('status'))
self.assertTrue(len(resp.findtext('token')) > 10)
@log_func_context
def test_detokenize(self):
secret = generate_random_string(101)
resp = self._create_new_token(secret)
token = resp.findtext('token')
resp = self._unwrap_token(token + 'xxx')
self.assertEqual('error', resp.findtext('status'))
self.assertEqual('token_not_found', resp.findtext('status_code'))
resp = self._unwrap_token(token)
self.assertEqual('success', resp.findtext('status'))
self.assertEqual(secret, resp.findtext('plain_text'))
self.assertTrue('mydomain', resp.findtext('domain'))
self.assertTrue(19, len(resp.findtext('expire_ts')))
self.assertTrue('someone@somedomain', resp.findtext('notify_email'))
@log_func_context
def test_no_auth(self):
secret = generate_random_string(333)
resp = self._create_new_token(secret, {'user': 'vasya'})
self.assertEqual('error', resp.findtext('status'))
self.assertEqual('auth_error', resp.findtext('status_code'))
resp = self._create_new_token(secret, {'user': 'detokenize_only'})
self.assertEqual('error', resp.findtext('status'))
self.assertEqual('auth_error', resp.findtext('status_code'))
resp = self._create_new_token(secret, {'passwd': 'asdfg'})
self.assertEqual('error', resp.findtext('status'))
self.assertEqual('auth_error', resp.findtext('status_code'))
resp = self._create_new_token(secret, {'domain': 'oebs'})
self.assertEqual('error', resp.findtext('status'))
self.assertEqual('auth_error', resp.findtext('status_code'))
if __name__ == '__main__':
import sys
sys.argv.append('-v')
unittest.main()
# vim:ts=4:sts=4:sw=4:tw=85:et:
| mit |
rwightman/tensorflow-litterbox | litterbox/models/sdc/model_sdc.py | 1 | 8350 | # Copyright (C) 2016 Ross Wightman. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
"""Model wrapper for Google's tensorflow/model/slim models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fabric
import tensorflow as tf
from .build_inception_resnet_sdc import *
from .build_resnet_sdc import *
from .build_nvidia_sdc import *
slim = tf.contrib.slim
sdc_default_params = {
'outputs': {'steer': 1, 'xyz': 2},
'network': 'inception_resnet_v2', # or one of other options in network_map
'regression_loss': 'mse', # or huber
'version': 2,
'bayesian': False,
'lock_root': False,
}
network_map = {
'inception_resnet_v2': build_inception_resnet_sdc_regression,
'resnet_v1_50': build_resnet_v1_50_sdc,
'resnet_v1_101': build_resnet_v1_101_sdc,
'resnet_v1_152': build_resnet_v1_152_sdc,
'nvidia_sdc': build_nvidia_sdc,
}
arg_scope_map = {
'inception_resnet_v2': inception_resnet_v2_arg_scope,
'resnet_v1_50': resnet_arg_scope,
'resnet_v1_101': resnet_arg_scope,
'resnet_v1_152': resnet_arg_scope,
'nvidia_sdc': nvidia_style_arg_scope,
}
class ModelSdc(fabric.model.Model):
def __init__(self, params={}):
super(ModelSdc, self).__init__()
params = fabric.model.merge_params(sdc_default_params, params)
print("ModelSdc params", params)
self.output_cfg = params['outputs']
# model variable scope needs to match google net for pretrained weight compat
if (params['network'] == 'resnet_v1_152' or
params['network'] == 'resnet_v1_101' or
params['network'] == 'resnet_v1_50'):
self.network = params['network']
self.model_variable_scope = params['network']
elif params['network'] == 'inception_resnet_v2':
self.network = 'inception_resnet_v2'
self.model_variable_scope = "InceptionResnetV2"
else:
assert params['network'] == 'nvidia_sdc'
self.network = 'nvidia_sdc'
self.model_variable_scope = "NvidiaSdc"
self.version = params['version']
self.bayesian = params['bayesian']
self.lock_root = params['lock_root']
if params['regression_loss'] == 'huber':
self.regression_loss = fabric.loss.loss_huber_with_aux
else:
self.regression_loss = fabric.loss.loss_mse_with_aux
self.disable_summaries = False
def build_tower(self, inputs, is_training=False, summaries=True, scope=None):
with slim.arg_scope(arg_scope_map[self.network]()):
output, endpoints = network_map[self.network](
inputs,
output_cfg=self.output_cfg,
version=self.version,
bayesian=self.bayesian,
lock_root=self.lock_root,
is_training=is_training)
aux_output = None
if 'AuxOutput' in endpoints:
aux_output = endpoints['AuxOutput']
self.add_tower(
scope,
endpoints=endpoints,
outputs=output,
aux_outputs=aux_output,
)
# Add summaries for viewing model statistics on TensorBoard.
if summaries:
self.activation_summaries()
return output
def add_tower_loss(self, targets, scope=None):
tower = self.tower(scope)
assert 'xyz' in self.output_cfg or 'steer' in self.output_cfg
if 'xyz' in self.output_cfg:
target_xyz = targets[1]
aux_output_xyz = None
if tower.aux_outputs:
aux_output_xyz = tower.aux_outputs['xyz']
self.regression_loss(
tower.outputs['xyz'], target_xyz, aux_predictions=aux_output_xyz)
if 'steer' in self.output_cfg:
target_steer = targets[0]
aux_output_steer = None
if tower.aux_outputs:
aux_output_steer = tower.aux_outputs['steer']
if self.output_cfg['steer'] > 1:
# steer is integer target, one hot output, use softmax
fabric.loss_softmax_cross_entropy_with_aux(
tower.outputs['steer'], target_steer, aux_logits=aux_output_steer)
else:
assert self.output_cfg['steer'] == 1
# steer is float target/output, use regression /w huber loss
self.regression_loss(
tower.outputs['steer'], target_steer, aux_predictions=aux_output_steer)
def get_predictions(self, outputs, processor=None):
if processor:
for k, v in outputs.items():
outputs[k] = processor.decode_output(v, key=k)
return outputs
def _remap_variable_names(self, variables, checkpoint_variable_set, prefix_scope):
def _strip_name(prefix, name):
name = name[len(prefix):] if name.startswith(prefix) else name
return name
if prefix_scope:
# strip our network prefix scope and remap accordingly
prefix_scope += '/'
restore_variables = {_strip_name(prefix_scope, v.op.name): v for v in variables}
return restore_variables
else:
return variables
def output_scopes(self, prefix_scope=''):
rel_scopes = ['logits', 'Logits', 'Output', 'Output/OutputXYZ', 'Output/OutputSteer', 'Output/Fc1',
'AuxLogits/OutputXYZ', 'AuxLogits/OutputSteer', 'AuxLogits/Fc1']
prefix = prefix_scope + '/' if prefix_scope else ''
prefix += self.model_variable_scope + '/'
abs_scopes = [prefix + x for x in rel_scopes]
return abs_scopes
@staticmethod
def eval_ops(predictions, labels, processor=None):
"""Generate a simple (non tower based) loss op for use in evaluation.
"""
ops = {}
if 'steer' in predictions:
steer_label = labels[0]
steer_prediction = predictions['steer']
if steer_prediction.get_shape()[-1].value > 1:
# one hot steering loss (non reduced)
steer_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
steer_prediction, steer_label, name='steer_xentropy_eval')
# decode non-linear mapping before mse
steer_prediction = tf.cast(tf.argmax(steer_prediction, dimension=1), tf.int32)
if processor:
steer_prediction = processor.decode_output(steer_prediction, key='steer')
steer_label = processor.decode_output(steer_label, key='steer')
else:
# linear regression steering loss
assert steer_prediction.get_shape()[-1].value == 1
steer_loss = fabric.loss.metric_huber(steer_prediction, steer_label)
if processor:
steer_prediction = processor.decode_output(steer_prediction, key='steer')
steer_label = processor.decode_output(steer_label, key='steer')
steer_mse = tf.squared_difference(
steer_prediction, steer_label, name='steer_mse_eval')
ops['steer_loss'] = steer_loss
ops['steer_mse'] = steer_mse
#ops['steer_prediction'] = steer_prediction
#ops['steer_label'] = steer_label
if 'xyz' in predictions:
xyz_labels = labels[1]
xyz_predictions = predictions['xyz']
if processor:
xyz_labels = processor.decode_output(xyz_labels, key='xyz')
xyz_predictions = processor.decode_output(xyz_predictions, key='xyz')
xyz_loss = fabric.loss.metric_huber(xyz_predictions, xyz_labels)
xyz_mse = tf.squared_difference(xyz_predictions, xyz_labels, name='xyz_mse_eval')
ops['xyz_loss'] = xyz_loss
ops['xyz_mse'] = xyz_mse
ops['xyz_prediction'] = xyz_predictions
ops['xyz_label'] = xyz_labels
return ops
| apache-2.0 |
chrismeyersfsu/ansible | lib/ansible/modules/network/panos/panos_commit.py | 32 | 3639 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_commit
short_description: commit firewall's candidate configuration
description:
- PanOS module that will commit firewall's candidate configuration on
- the device. The new configuration will become active immediately.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
interval:
description:
- interval for checking commit job
required: false
default: 0.5
timeout:
description:
- timeout for commit job
required: false
default: None
sync:
description:
- if commit should be synchronous
required: false
default: true
'''
EXAMPLES = '''
# Commit candidate config on 192.168.1.1 in sync mode
- panos_commit:
ip_address: "192.168.1.1"
username: "admin"
password: "admin"
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "okey dokey"
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
argument_spec = dict(
ip_address=dict(),
password=dict(no_log=True),
username=dict(default='admin'),
interval=dict(default=0.5),
timeout=dict(),
sync=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
password = module.params["password"]
if not password:
module.fail_json(msg="password is required")
username = module.params['username']
interval = module.params['interval']
timeout = module.params['timeout']
sync = module.params['sync']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
xapi.commit(
cmd="<commit></commit>",
sync=sync,
interval=interval,
timeout=timeout
)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
jcsp/manila | manila/api/v1/share_types.py | 2 | 4036 | # Copyright (c) 2014 NetApp, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The share type & share types extra specs extension."""
from oslo_utils import strutils
import six
from webob import exc
from manila.api.openstack import wsgi
from manila.api.views import types as views_types
from manila import exception
from manila.i18n import _
from manila import policy
from manila.share import share_types
RESOURCE_NAME = 'share_type'
class ShareTypesController(wsgi.Controller):
"""The share types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
def index(self, req):
"""Returns the list of share types."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'index')
limited_types = self._get_share_types(req)
req.cache_db_share_types(limited_types)
return self._view_builder.index(req, limited_types)
def show(self, req, id):
"""Return a single share type item."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'show')
try:
share_type = share_types.get_share_type(context, id)
except exception.NotFound:
msg = _("Share type not found.")
raise exc.HTTPNotFound(explanation=msg)
share_type['id'] = six.text_type(share_type['id'])
req.cache_db_share_type(share_type)
return self._view_builder.show(req, share_type)
def default(self, req):
"""Return default volume type."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'default')
try:
share_type = share_types.get_default_share_type(context)
except exception.NotFound:
msg = _("Share type not found")
raise exc.HTTPNotFound(explanation=msg)
if not share_type:
msg = _("Default share type not found")
raise exc.HTTPNotFound(explanation=msg)
share_type['id'] = six.text_type(share_type['id'])
return self._view_builder.show(req, share_type)
def _get_share_types(self, req):
"""Helper function that returns a list of type dicts."""
filters = {}
context = req.environ['manila.context']
if context.is_admin:
# Only admin has query access to all share types
filters['is_public'] = self._parse_is_public(
req.params.get('is_public'))
else:
filters['is_public'] = True
limited_types = share_types.get_all_types(
context, search_opts=filters).values()
return list(limited_types)
@staticmethod
def _parse_is_public(is_public):
"""Parse is_public into something usable.
* True: API should list public share types only
* False: API should list private share types only
* None: API should list both public and private share types
"""
if is_public is None:
# preserve default value of showing only public types
return True
elif six.text_type(is_public).lower() == "all":
return None
else:
try:
return strutils.bool_from_string(is_public, strict=True)
except ValueError:
msg = _('Invalid is_public filter [%s]') % is_public
raise exc.HTTPBadRequest(explanation=msg)
def create_resource():
return wsgi.Resource(ShareTypesController())
| apache-2.0 |
gitprouser/appengine-bottle-skeleton | lib/cryptography/hazmat/primitives/ciphers/modes.py | 37 | 5517 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography import utils
@six.add_metaclass(abc.ABCMeta)
class Mode(object):
@abc.abstractproperty
def name(self):
"""
A string naming this mode (e.g. "ECB", "CBC").
"""
@abc.abstractmethod
def validate_for_algorithm(self, algorithm):
"""
Checks that all the necessary invariants of this (mode, algorithm)
combination are met.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithInitializationVector(object):
@abc.abstractproperty
def initialization_vector(self):
"""
The value of the initialization vector for this mode as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithNonce(object):
@abc.abstractproperty
def nonce(self):
"""
The value of the nonce for this mode as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class ModeWithAuthenticationTag(object):
@abc.abstractproperty
def tag(self):
"""
The value of the tag supplied to the constructor of this mode.
"""
def _check_iv_length(self, algorithm):
if len(self.initialization_vector) * 8 != algorithm.block_size:
raise ValueError("Invalid IV size ({0}) for {1}.".format(
len(self.initialization_vector), self.name
))
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class CBC(object):
name = "CBC"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_length
@utils.register_interface(Mode)
class ECB(object):
name = "ECB"
def validate_for_algorithm(self, algorithm):
pass
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class OFB(object):
name = "OFB"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class CFB(object):
name = "CFB"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
class CFB8(object):
name = "CFB8"
def __init__(self, initialization_vector):
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
self._initialization_vector = initialization_vector
initialization_vector = utils.read_only_property("_initialization_vector")
validate_for_algorithm = _check_iv_length
@utils.register_interface(Mode)
@utils.register_interface(ModeWithNonce)
class CTR(object):
name = "CTR"
def __init__(self, nonce):
if not isinstance(nonce, bytes):
raise TypeError("nonce must be bytes")
self._nonce = nonce
nonce = utils.read_only_property("_nonce")
def validate_for_algorithm(self, algorithm):
if len(self.nonce) * 8 != algorithm.block_size:
raise ValueError("Invalid nonce size ({0}) for {1}.".format(
len(self.nonce), self.name
))
@utils.register_interface(Mode)
@utils.register_interface(ModeWithInitializationVector)
@utils.register_interface(ModeWithAuthenticationTag)
class GCM(object):
name = "GCM"
_MAX_ENCRYPTED_BYTES = (2 ** 39 - 256) // 8
_MAX_AAD_BYTES = (2 ** 64) // 8
def __init__(self, initialization_vector, tag=None, min_tag_length=16):
# len(initialization_vector) must in [1, 2 ** 64), but it's impossible
# to actually construct a bytes object that large, so we don't check
# for it
if min_tag_length < 4:
raise ValueError("min_tag_length must be >= 4")
if tag is not None and len(tag) < min_tag_length:
raise ValueError(
"Authentication tag must be {0} bytes or longer.".format(
min_tag_length)
)
if not isinstance(initialization_vector, bytes):
raise TypeError("initialization_vector must be bytes")
if tag is not None and not isinstance(tag, bytes):
raise TypeError("tag must be bytes or None")
self._initialization_vector = initialization_vector
self._tag = tag
tag = utils.read_only_property("_tag")
initialization_vector = utils.read_only_property("_initialization_vector")
def validate_for_algorithm(self, algorithm):
pass
| apache-2.0 |
carmark/vbox | src/libs/libxml2-2.6.31/python/setup.py | 19 | 6680 | #!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/usr'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.6.31",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| gpl-2.0 |
OpusVL/odoo | addons/hw_proxy/__openerp__.py | 8 | 1608 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-2.0 |
bittner/django-allauth | allauth/socialaccount/providers/xing/tests.py | 7 | 1942 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from allauth.socialaccount.tests import OAuthTestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import XingProvider
class XingTests(OAuthTestsMixin, TestCase):
provider_id = XingProvider.id
def get_mocked_response(self):
return [MockedResponse(200, """
{"users":[{"id":"20493333_1cd028","active_email":"raymond.penners@example.com",
"badges":[],"birth_date":{"year":null,"month":null,"day":null},
"business_address":{"street":null,"zip_code":null,"city":null,"province":null,
"country":"NL","email":null,"fax":null,"phone":null,"mobile_phone":null},
"display_name":"Raymond Penners","educational_background":
{"primary_school_id":null,"schools":[],"qualifications":[]},
"employment_status":"EMPLOYEE","first_name":"Raymond","gender":"m",
"haves":null,"instant_messaging_accounts":{},"interests":null,"languages":
{"nl":null},"last_name":"Penners","organisation_member":null,
"page_name":"Raymond_Penners",
"permalink":"https://www.xing.com/profile/Raymond_Penners",
"photo_urls":{"thumb":"https://www.xing.com/img/n/nobody_m.30x40.jpg",
"large":"https://www.xing.com/img/n/nobody_m.140x185.jpg","mini_thumb":
"https://www.xing.com/img/n/nobody_m.18x24.jpg","maxi_thumb":
"https://www.xing.com/img/n/nobody_m.70x93.jpg","medium_thumb":
"https://www.xing.com/img/n/nobody_m.57x75.jpg"},"premium_services":[],
"private_address":{"street":null,"zip_code":null,"city":null,"province":null,
"country":null,"email":"raymond.penners@example.com","fax":null,
"phone":null,"mobile_phone":null},"professional_experience":
{"primary_company":{"name":null,"url":null,"tag":null,"title":null,
"begin_date":null,"end_date":null,"description":null,"industry":"OTHERS",
"company_size":null,"career_level":null},"non_primary_companies":[],
"awards":[]},"time_zone":{"utc_offset":2.0,"name":"Europe/Berlin"},
"wants":null,"web_profiles":{}}]}
""")]
| mit |
apanju/GMIO_Odoo | addons/stock_dropshipping/stock_dropshipping.py | 160 | 2228 | # coding: utf-8
from openerp import models, api, _
from openerp.exceptions import Warning
class sale_order_line(models.Model):
_inherit = 'sale.order.line'
@api.multi
def _check_routing(self, product, warehouse):
""" skip stock verification if the route goes from supplier to customer
As the product never goes in stock, no need to verify it's availibility
"""
res = super(sale_order_line, self)._check_routing(product, warehouse)
if not res:
for line in self:
for pull_rule in line.route_id.pull_ids:
if (pull_rule.picking_type_id.default_location_src_id.usage == 'supplier' and
pull_rule.picking_type_id.default_location_dest_id.usage == 'customer'):
res = True
break
return res
class purchase_order(models.Model):
_inherit = 'purchase.order'
@api.one
def _check_invoice_policy(self):
if self.invoice_method == 'picking' and self.location_id.usage == 'customer':
for proc in self.order_line.mapped('procurement_ids'):
if proc.sale_line_id.order_id.order_policy == 'picking':
raise Warning(_('In the case of a dropship route, it is not possible to have an invoicing control set on "Based on incoming shipments" and a sale order with an invoice creation on "On Delivery Order"'))
@api.multi
def wkf_confirm_order(self):
""" Raise a warning to forbid to have both purchase and sale invoices
policies at delivery in dropshipping. As it is not implemented.
This check can be disabled setting 'no_invoice_policy_check' in context
"""
if not self.env.context.get('no_invoice_policy_check'):
self._check_invoice_policy()
super(purchase_order, self).wkf_confirm_order()
class procurement_order(models.Model):
_inherit = 'procurement.order'
@api.model
def update_origin_po(self, po, proc):
super(procurement_order, self).update_origin_po(po, proc)
if proc.sale_line_id and not (proc.origin in po.origin):
po.sudo().write({'origin': po.origin+', '+proc.origin})
| agpl-3.0 |
franbull/didthattoday | didthattoday/didthattoday/tests/test_step_views.py | 1 | 2921 | import simplejson
from base import IntegrationTestBase
from didthattoday.models import Step
from didthattoday.models import Habit
class TestStepViews(IntegrationTestBase):
def create_habit(self):
assert(self.session.query(Habit).count() == 0)
habit = {'name': 'woo', 'description': 'hoo'}
res = self.app.post('/habits', simplejson.dumps(habit))
self.assertEqual(res.status_int, 200)
res = self.app.get('/habits')
self.assertEqual(res.status_int, 200)
rd = res.json
assert(1 == len(rd['habits']))
habit = rd['habits'][0]
assert(habit['name'] == 'woo')
return habit['id']
def test_steps(self):
assert(self.session.query(Step).count() == 0)
habit_id = self.create_habit()
step = {'comment': 'woo', 'happened_at': '2014-03-09T04:58:25.513Z',
'habit_id': habit_id}
res = self.app.post('/steps', simplejson.dumps(step))
self.assertEqual(res.status_int, 200)
res = self.app.get('/steps')
self.assertEqual(res.status_int, 200)
rd = res.json
assert(1 == len(rd['steps']))
step = rd['steps'][0]
assert(step['comment'] == 'woo')
print Step.Session.query(Step).count()
a = Step.Session.query(Step).first()
print a.happened_at
import pdb; pdb.set_trace()
#def test_get_habit(self):
#assert(self.session.query(Habit).count() == 0)
#habit = {'name': 'woo', 'description': 'hoo'}
#res = self.app.post('/habits', simplejson.dumps(habit))
#self.assertEqual(res.status_int, 200)
#res = self.app.get('/habits')
#self.assertEqual(res.status_int, 200)
#rd = res.json
#assert(1 == len(rd['habits']))
#habit = rd['habits'][0]
#assert(habit['name'] == 'woo')
#id = habit['id']
#res = self.app.get('/habit/%s' % id)
#self.assertEqual(res.status_int, 200)
#rd = res.json
#assert(rd['id'] == id)
#assert(rd['name'] == 'woo')
#def test_update_habit(self):
#assert(self.session.query(Habit).count() == 0)
#habit = {'name': 'woo', 'description': 'hoo'}
#res = self.app.post('/habits', simplejson.dumps(habit))
#self.assertEqual(res.status_int, 200)
#res = self.app.get('/habits')
#self.assertEqual(res.status_int, 200)
#rd = res.json
#assert(1 == len(rd['habits']))
#habit = rd['habits'][0]
#assert(habit['name'] == 'woo')
#id = habit['id']
#habit['name'] = 'coo'
#res = self.app.put('/habit/%s' % id, simplejson.dumps(habit))
#self.assertEqual(res.status_int, 200)
#res = self.app.get('/habit/%s' % id)
#self.assertEqual(res.status_int, 200)
#rd = res.json
#assert(rd['id'] == id)
#assert(rd['name'] == 'coo')
| apache-2.0 |
smallyear/linuxLearn | salt/salt/states/npm.py | 3 | 9384 | # -*- coding: utf-8 -*-
'''
Installation of NPM Packages
============================
These states manage the installed packages for node.js using the Node Package
Manager (npm). Note that npm must be installed for these states to be
available, so npm states should include a requisite to a pkg.installed state
for the package which provides npm (simply ``npm`` in most cases). Example:
.. code-block:: yaml
npm:
pkg.installed
yaml:
npm.installed:
- require:
- pkg: npm
'''
# Import salt libs
from __future__ import absolute_import
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
import salt.ext.six as six
def __virtual__():
'''
Only load if the npm module is available in __salt__
'''
return 'npm' if 'npm.list' in __salt__ else False, '\'npm\' binary not found on system'
def installed(name,
pkgs=None,
dir=None,
user=None,
force_reinstall=False,
registry=None,
env=None):
'''
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
coffee-script:
npm.installed:
- user: someuser
coffee-script@1.0.1:
npm.installed: []
name
The package to install
.. versionchanged:: 2014.7.2
This parameter is no longer lowercased by salt so that
case-sensitive NPM package names will work.
pkgs
A list of packages to install with a single npm invocation; specifying
this argument will ignore the ``name`` argument
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
user
The user to run NPM with
.. versionadded:: 0.17.0
registry
The NPM registry from which to install the package
.. versionadded:: 2014.7.0
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
.. versionadded:: 2014.7.0
force_reinstall
Install the package even if it is already installed
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__['npm.list'](dir=dir, runas=user, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
return ret
else:
installed_pkgs = dict((p, info)
for p, info in six.iteritems(installed_pkgs))
pkgs_satisfied = []
pkgs_to_install = []
def _pkg_is_installed(pkg, installed_pkgs):
'''
Helper function to determine if a package is installed
This performs more complex comparison than just checking
keys, such as examining source repos to see if the package
was installed by a different name from the same repo
:pkg str: The package to compare
:installed_pkgs: A dictionary produced by npm list --json
'''
if (pkg_name in installed_pkgs and
'version' in installed_pkgs[pkg_name]):
return True
# Check to see if we are trying to install from a URI
elif '://' in pkg_name: # TODO Better way?
for pkg_details in installed_pkgs.values():
try:
pkg_from = pkg_details.get('from', '').split('://')[1]
if pkg_name.split('://')[1] == pkg_from:
return True
except IndexError:
pass
return False
for pkg in pkg_list:
pkg_name, _, pkg_ver = pkg.partition('@')
pkg_name = pkg_name.strip()
if force_reinstall is True:
pkgs_to_install.append(pkg)
continue
if not _pkg_is_installed(pkg, installed_pkgs):
pkgs_to_install.append(pkg)
continue
installed_name_ver = '{0}@{1}'.format(pkg_name,
installed_pkgs[pkg_name]['version'])
# If given an explicit version check the installed version matches.
if pkg_ver:
if installed_pkgs[pkg_name].get('version') != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(installed_name_ver)
continue
else:
pkgs_satisfied.append(installed_name_ver)
continue
if __opts__['test']:
ret['result'] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append('NPM package(s) {0!r} are set to be installed'
.format(', '.join(pkgs_to_install)))
ret['changes'] = {'old': [], 'new': pkgs_to_install}
if pkgs_satisfied:
comment_msg.append('Package(s) {0!r} satisfied by {1}'
.format(', '.join(pkg_list), ', '.join(pkgs_satisfied)))
ret['result'] = True
ret['comment'] = '. '.join(comment_msg)
return ret
if not pkgs_to_install:
ret['result'] = True
ret['comment'] = ('Package(s) {0!r} satisfied by {1}'
.format(', '.join(pkg_list), ', '.join(pkgs_satisfied)))
return ret
try:
cmd_args = {
'dir': dir,
'runas': user,
'registry': registry,
'env': env,
'pkgs': pkg_list,
}
call = __salt__['npm.install'](**cmd_args)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error installing {0!r}: {1}'.format(
', '.join(pkg_list), err)
return ret
if call and (isinstance(call, list) or isinstance(call, dict)):
ret['result'] = True
ret['changes'] = {'old': [], 'new': pkgs_to_install}
ret['comment'] = 'Package(s) {0!r} successfully installed'.format(
', '.join(pkgs_to_install))
else:
ret['result'] = False
ret['comment'] = 'Could not install package(s) {0!r}'.format(
', '.join(pkg_list))
return ret
def removed(name,
dir=None,
user=None):
'''
Verify that the given package is not installed.
dir
The target directory in which to install the package, or None for
global installation
user
The user to run NPM with
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['npm.list'](dir=dir)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error uninstalling {0!r}: {1}'.format(name, err)
return ret
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package {0!r} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0!r} is set to be removed'.format(name)
return ret
if __salt__['npm.uninstall'](pkg=name, dir=dir, runas=user):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package {0!r} was successfully removed'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Error removing package {0!r}'.format(name)
return ret
def bootstrap(name,
user=None,
silent=True):
'''
Bootstraps a node.js application.
Will execute 'npm install --json' on the specified directory.
user
The user to run NPM with
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
try:
call = __salt__['npm.install'](dir=name, runas=user, pkg=None, silent=silent, dry_run=True)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error Bootstrapping {0!r}: {1}'.format(name, err)
return ret
ret['result'] = None
ret['changes'] = {'old': [], 'new': call}
ret['comment'] = '{0} is set to be bootstrapped'.format(name)
return ret
try:
call = __salt__['npm.install'](dir=name, runas=user, pkg=None, silent=silent)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error Bootstrapping {0!r}: {1}'.format(name, err)
return ret
if not call:
ret['result'] = True
ret['comment'] = 'Directory is already bootstrapped'
return ret
# npm.install will return a string if it can't parse a JSON result
if isinstance(call, str):
ret['result'] = False
ret['changes'] = call
ret['comment'] = 'Could not bootstrap directory'
else:
ret['result'] = True
ret['changes'] = {name: 'Bootstrapped'}
ret['comment'] = 'Directory was successfully bootstrapped'
return ret
| apache-2.0 |
Flexget/Flexget | flexget/plugins/filter/content_filter.py | 3 | 4772 | from fnmatch import fnmatch
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
logger = logger.bind(name='content_filter')
class FilterContentFilter:
"""
Rejects entries based on the filenames in the content. Torrent files only right now.
Example::
content_filter:
require:
- '*.avi'
- '*.mkv'
"""
schema = {
'type': 'object',
'properties': {
# These two properties allow a string or list of strings
'require': one_or_more({'type': 'string'}),
'require_all': one_or_more({'type': 'string'}),
'reject': one_or_more({'type': 'string'}),
'require_mainfile': {'type': 'boolean', 'default': False},
'strict': {'type': 'boolean', 'default': False},
},
'additionalProperties': False,
}
def prepare_config(self, config):
for key in ['require', 'require_all', 'reject']:
if key in config:
if isinstance(config[key], str):
config[key] = [config[key]]
return config
def process_entry(self, task, entry, config):
"""
Process an entry and reject it if it doesn't pass filter.
:param task: Task entry belongs to.
:param entry: Entry to process
:return: True, if entry was rejected.
"""
if 'content_files' in entry:
files = entry['content_files']
logger.debug('{} files: {}', entry['title'], files)
def matching_mask(files, masks):
"""Returns matching mask if any files match any of the masks, false otherwise"""
for file in files:
for mask in masks:
if fnmatch(file, mask):
return mask
return False
# Avoid confusion by printing a reject message to info log, as
# download plugin has already printed a downloading message.
if config.get('require'):
if not matching_mask(files, config['require']):
logger.info(
'Entry {} does not have any of the required filetypes, rejecting',
entry['title'],
)
entry.reject('does not have any of the required filetypes', remember=True)
return True
if config.get('require_all'):
# Make sure each mask matches at least one of the contained files
if not all(
any(fnmatch(file, mask) for file in files) for mask in config['require_all']
):
logger.info(
'Entry {} does not have all of the required filetypes, rejecting',
entry['title'],
)
entry.reject('does not have all of the required filetypes', remember=True)
return True
if config.get('reject'):
mask = matching_mask(files, config['reject'])
if mask:
logger.info('Entry {} has banned file {}, rejecting', entry['title'], mask)
entry.reject('has banned file %s' % mask, remember=True)
return True
if config.get('require_mainfile') and len(files) > 1:
best = None
for f in entry['torrent'].get_filelist():
if not best or f['size'] > best:
best = f['size']
if (100 * float(best) / float(entry['torrent'].size)) < 90:
logger.info('Entry {} does not have a main file, rejecting', entry['title'])
entry.reject('does not have a main file', remember=True)
return True
@plugin.priority(150)
def on_task_modify(self, task, config):
if task.options.test or task.options.learn:
logger.info(
'Plugin is partially disabled with --test and --learn '
'because content filename information may not be available'
)
# return
config = self.prepare_config(config)
for entry in task.accepted:
if self.process_entry(task, entry, config):
task.rerun(plugin='content_filter')
elif 'content_files' not in entry and config.get('strict'):
entry.reject('no content files parsed for entry', remember=True)
task.rerun(plugin='content_filter')
@event('plugin.register')
def register_plugin():
plugin.register(FilterContentFilter, 'content_filter', api_ver=2)
| mit |
wcmitchell/insights-core | insights/combiners/tests/test_virt_who_conf.py | 1 | 4088 | from insights.combiners.virt_who_conf import AllVirtWhoConf
from insights.parsers.virt_who_conf import VirtWhoConf
from insights.parsers.sysconfig import VirtWhoSysconfig
from insights.tests import context_wrap
VWHO_D_CONF_ESX = """
## This is a template for virt-who configuration files. Please see
## virt-who-config(5) manual page for detailed information.
##
## virt-who checks all files in /etc/virt-who.d/ if they're valid ini-like
## files and uses them as configuration. Each file might contain more configs.
##
## You can uncomment and fill following template or create new file with
## similar content.
## For complete list of options, see virt-who-config(5) manual page.
## Terse version of the config template:
[esx_1]
type=esx
server=10.72.32.219
#encrypted_password=
owner=Satellite
env=Satellite
"""
VWHO_D_CONF_HYPER = """
## This is a template for virt-who configuration files. Please see
## virt-who-config(5) manual page for detailed information.
##
## virt-who checks all files in /etc/virt-who.d/ if they're valid ini-like
## files and uses them as configuration. Each file might contain more configs.
##
## You can uncomment and fill following template or create new file with
## similar content.
## For complete list of options, see virt-who-config(5) manual page.
## Terse version of the config template:
[hyperv_1]
type=hyperv
server=10.72.32.209
#encrypted_password=
owner=Satellite
env=Satellite
"""
VWHO_CONF = """
## This is a template for virt-who global configuration files. Please see
## virt-who-config(5) manual page for detailed information.
##
## virt-who checks /etc/virt-who.conf for sections 'global' and 'defaults'.
## The sections and their values are explained below.
## NOTE: These sections retain their special meaning and function only when present in /etc/virt-who.conf
##
## You can uncomment and fill following template or create new file with
## similar content.
#Terse version of the general config template:
[global]
interval=3600
debug=False
oneshot=False
#log_per_config=False
#log_dir=
#log_file=
#configs=
[defaults]
owner=Satellite
env=Satellite
"""
SYS_VIRTWHO = """
VIRTWHO_BACKGROUND=0
VIRTWHO_ONE_SHOT=1
VIRTWHO_INTERVAL=1000
VIRTWHO_SATELLITE6=1
""".strip()
SYS_VIRTWHO_SAT_LEGACY = """
VIRTWHO_BACKGROUND=1
VIRTWHO_SATELLITE=1
""".strip()
SYS_VIRTWHO_SAM = """
VIRTWHO_SAM=1
""".strip()
SYS_VIRTWHO_CP = """
VIRTWHO_SAM=0
""".strip()
def test_virt_who_conf_1():
vw_sysconf = VirtWhoSysconfig(context_wrap(SYS_VIRTWHO))
vwho_conf = VirtWhoConf(context_wrap(VWHO_CONF))
vwhod_conf1 = VirtWhoConf(context_wrap(VWHO_D_CONF_HYPER))
vwhod_conf2 = VirtWhoConf(context_wrap(VWHO_D_CONF_ESX))
result = AllVirtWhoConf(vw_sysconf, [vwho_conf, vwhod_conf1, vwhod_conf2])
assert result.background is False
assert result.oneshot is True
assert result.interval == 1000
assert result.sm_type == 'sat6'
assert sorted(result.hypervisor_types) == sorted(['esx', 'hyperv'])
assert sorted(result.hypervisors) == sorted([
{'name': 'esx_1', 'server': '10.72.32.219', 'env': 'Satellite',
'owner': 'Satellite', 'type': 'esx'},
{'name': 'hyperv_1', 'server': '10.72.32.209', 'env': 'Satellite',
'owner': 'Satellite', 'type': 'hyperv'}])
def test_virt_who_conf_2():
vw_sysconf = VirtWhoSysconfig(context_wrap(SYS_VIRTWHO_SAT_LEGACY))
vwho_conf = VirtWhoConf(context_wrap(VWHO_CONF))
result = AllVirtWhoConf(vw_sysconf, [vwho_conf])
assert result.background is True
assert result.oneshot is False
assert result.interval == 3600
assert result.sm_type == 'sat5'
def test_virt_who_conf_3():
vw_sysconf = VirtWhoSysconfig(context_wrap(SYS_VIRTWHO_SAM))
vwho_conf = VirtWhoConf(context_wrap(VWHO_CONF))
result = AllVirtWhoConf(vw_sysconf, [vwho_conf])
assert result.sm_type == 'sam'
def test_virt_who_conf_4():
vw_sysconf = VirtWhoSysconfig(context_wrap(SYS_VIRTWHO_CP))
vwho_conf = VirtWhoConf(context_wrap(VWHO_CONF))
result = AllVirtWhoConf(vw_sysconf, [vwho_conf])
assert result.sm_type == 'cp'
| apache-2.0 |
EnerNOC/smallfoot-sleekxmpp | sleekxmpp/xmlstream/handler/callback.py | 2 | 2874 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream.handler.base import BaseHandler
class Callback(BaseHandler):
"""
The Callback handler will execute a callback function with
matched stanzas.
The handler may execute the callback either during stream
processing or during the main event loop.
Callback functions are all executed in the same thread, so be
aware if you are executing functions that will block for extended
periods of time. Typically, you should signal your own events using the
SleekXMPP object's event() method to pass the stanza off to a threaded
event handler for further processing.
Methods:
prerun -- Overrides BaseHandler.prerun
run -- Overrides BaseHandler.run
"""
def __init__(self, name, matcher, pointer, thread=False,
once=False, instream=False, stream=None):
"""
Create a new callback handler.
Arguments:
name -- The name of the handler.
matcher -- A matcher object for matching stanza objects.
pointer -- The function to execute during callback.
thread -- DEPRECATED. Remains only for backwards compatibility.
once -- Indicates if the handler should be used only
once. Defaults to False.
instream -- Indicates if the callback should be executed
during stream processing instead of in the
main event loop.
stream -- The XMLStream instance this handler should monitor.
"""
BaseHandler.__init__(self, name, matcher, stream)
self._pointer = pointer
self._once = once
self._instream = instream
def prerun(self, payload):
"""
Execute the callback during stream processing, if
the callback was created with instream=True.
Overrides BaseHandler.prerun
Arguments:
payload -- The matched stanza object.
"""
BaseHandler.prerun(self, payload)
if self._instream:
self.run(payload, True)
def run(self, payload, instream=False):
"""
Execute the callback function with the matched stanza payload.
Overrides BaseHandler.run
Arguments:
payload -- The matched stanza object.
instream -- Force the handler to execute during
stream processing. Used only by prerun.
Defaults to False.
"""
if not self._instream or instream:
BaseHandler.run(self, payload)
self._pointer(payload)
if self._once:
self._destroy = True
| mit |
grevutiu-gabriel/sympy | sympy/core/tests/test_diff.py | 115 | 2793 | from sympy import Symbol, Rational, cos, sin, tan, cot, exp, log, Function, \
Derivative, Expr, symbols, pi, I, S
from sympy.utilities.pytest import raises
def test_diff():
x, y = symbols('x, y')
assert Rational(1, 3).diff(x) is S.Zero
assert I.diff(x) is S.Zero
assert pi.diff(x) is S.Zero
assert x.diff(x, 0) == x
assert (x**2).diff(x, 2, x) == 0
assert (x**2).diff(x, y, 0) == 2*x
assert (x**2).diff(x, y) == 0
raises(ValueError, lambda: x.diff(1, x))
a = Symbol("a")
b = Symbol("b")
c = Symbol("c")
p = Rational(5)
e = a*b + b**p
assert e.diff(a) == b
assert e.diff(b) == a + 5*b**4
assert e.diff(b).diff(a) == Rational(1)
e = a*(b + c)
assert e.diff(a) == b + c
assert e.diff(b) == a
assert e.diff(b).diff(a) == Rational(1)
e = c**p
assert e.diff(c, 6) == Rational(0)
assert e.diff(c, 5) == Rational(120)
e = c**Rational(2)
assert e.diff(c) == 2*c
e = a*b*c
assert e.diff(c) == a*b
def test_diff2():
n3 = Rational(3)
n2 = Rational(2)
n6 = Rational(6)
x, c = map(Symbol, 'xc')
e = n3*(-n2 + x**n2)*cos(x) + x*(-n6 + x**n2)*sin(x)
assert e == 3*(-2 + x**2)*cos(x) + x*(-6 + x**2)*sin(x)
assert e.diff(x).expand() == x**3*cos(x)
e = (x + 1)**3
assert e.diff(x) == 3*(x + 1)**2
e = x*(x + 1)**3
assert e.diff(x) == (x + 1)**3 + 3*x*(x + 1)**2
e = 2*exp(x*x)*x
assert e.diff(x) == 2*exp(x**2) + 4*x**2*exp(x**2)
def test_diff3():
a, b, c = map(Symbol, 'abc')
p = Rational(5)
e = a*b + sin(b**p)
assert e == a*b + sin(b**5)
assert e.diff(a) == b
assert e.diff(b) == a + 5*b**4*cos(b**5)
e = tan(c)
assert e == tan(c)
assert e.diff(c) in [cos(c)**(-2), 1 + sin(c)**2/cos(c)**2, 1 + tan(c)**2]
e = c*log(c) - c
assert e == -c + c*log(c)
assert e.diff(c) == log(c)
e = log(sin(c))
assert e == log(sin(c))
assert e.diff(c) in [sin(c)**(-1)*cos(c), cot(c)]
e = (Rational(2)**a/log(Rational(2)))
assert e == 2**a*log(Rational(2))**(-1)
assert e.diff(a) == 2**a
def test_diff_no_eval_derivative():
class My(Expr):
def __new__(cls, x):
return Expr.__new__(cls, x)
x, y = symbols('x y')
# My doesn't have its own _eval_derivative method
assert My(x).diff(x).func is Derivative
# it doesn't have y so it shouldn't need a method for this case
assert My(x).diff(y) == 0
def test_speed():
# this should return in 0.0s. If it takes forever, it's wrong.
x = Symbol("x")
assert x.diff(x, 10**8) == 0
def test_deriv_noncommutative():
A = Symbol("A", commutative=False)
f = Function("f")
x = Symbol("x")
assert A*f(x)*A == f(x)*A**2
assert A*f(x).diff(x)*A == f(x).diff(x) * A**2
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/network/nxos/nxos_pim.py | 18 | 6260 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_pim
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of a PIM instance.
description:
- Manages configuration of a Protocol Independent Multicast (PIM) instance.
author: Gabriele Gerbino (@GGabriele)
options:
bfd:
description:
- Enables BFD on all PIM interfaces.
- "Dependency: 'feature bfd'"
version_added: "2.9"
type: str
choices: ['enable', 'disable']
ssm_range:
description:
- Configure group ranges for Source Specific Multicast (SSM).
Valid values are multicast addresses or the keyword C(none)
or keyword C(default). C(none) removes all SSM group ranges.
C(default) will set ssm_range to the default multicast address.
If you set multicast address, please ensure that it is not the
same as the C(default), otherwise use the C(default) option.
required: true
'''
EXAMPLES = '''
- name: Configure ssm_range, enable bfd
nxos_pim:
bfd: enable
ssm_range: "224.0.0.0/8"
- name: Set to default
nxos_pim:
ssm_range: default
- name: Remove all ssm group ranges
nxos_pim:
ssm_range: none
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample:
- ip pim bfd
- ip pim ssm range 224.0.0.0/8
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'bfd': 'ip pim bfd',
'ssm_range': 'ip pim ssm range',
}
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
if 'ssm_range' in arg:
# <value> may be 'n.n.n.n/s', 'none', or 'default'
m = re.search(r'ssm range (?P<value>(?:[\s\d.\/]+|none|default))?$', config, re.M)
if m:
# Remove rsvd SSM range
value = m.group('value').replace('232.0.0.0/8', '')
existing[arg] = value.split()
elif 'bfd' in arg and 'ip pim bfd' in config:
existing[arg] = 'enable'
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if value is not None:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
for key, value in proposed_commands.items():
command = ''
if key == 'ip pim ssm range':
if value == 'default':
# no cmd needs a value but the actual value does not matter
command = 'no ip pim ssm range none'
elif value == 'none':
command = 'ip pim ssm range none'
elif value:
command = 'ip pim ssm range {0}'.format(value)
elif key == 'ip pim bfd':
no_cmd = 'no ' if value == 'disable' else ''
command = no_cmd + key
if command:
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
bfd=dict(required=False, type='str', choices=['enable', 'disable']),
ssm_range=dict(required=False, type='list', default=[]),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False, 'commands': [], 'warnings': warnings}
params = module.params
args = [k for k in PARAM_TO_COMMAND_KEYMAP.keys() if params[k] is not None]
# SSM syntax check
if 'ssm_range' in args:
for item in params['ssm_range']:
if re.search('none|default', item):
break
if len(item.split('.')) != 4:
module.fail_json(msg="Valid ssm_range values are multicast addresses "
"or the keyword 'none' or the keyword 'default'.")
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in params.items() if k in args)
proposed = {}
for key, value in proposed_args.items():
if key == 'ssm_range':
if value and value[0] == 'default':
if existing.get(key):
proposed[key] = 'default'
else:
v = sorted(set([str(i) for i in value]))
ex = sorted(set([str(i) for i in existing.get(key, [])]))
if v != ex:
proposed[key] = ' '.join(str(s) for s in v)
elif key == 'bfd':
if value != existing.get('bfd', 'disable'):
proposed[key] = value
elif value != existing.get(key):
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
result['changed'] = True
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
dbcls/dbcls-galaxy | tools/metag_tools/short_reads_figure_score.py | 2 | 9229 | #! /usr/bin/python
"""
boxplot:
- box: first quartile and third quartile
- line inside the box: median
- outlier: 1.5 IQR higher than the third quartile or 1.5 IQR lower than the first quartile
IQR = third quartile - first quartile
- The smallest/largest value that is not an outlier is connected to the box by with a horizontal line.
"""
import os, sys, math, tempfile, zipfile, re
from rpy import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def unzip( filename ):
zip_file = zipfile.ZipFile( filename, 'r' )
tmpfilename = tempfile.NamedTemporaryFile().name
for name in zip_file.namelist():
file( tmpfilename, 'a' ).write( zip_file.read( name ) )
zip_file.close()
return tmpfilename
def merge_to_20_datapoints( score ):
number_of_points = 20
read_length = len( score )
step = int( math.floor( ( read_length - 1 ) * 1.0 / number_of_points ) )
scores = []
point = 1
point_sum = 0
step_average = 0
score_points = 0
for i in xrange( 1, read_length ):
if i < ( point * step ):
point_sum += int( score[i] )
step_average += 1
else:
point_avg = point_sum * 1.0 / step_average
scores.append( point_avg )
point += 1
point_sum = 0
step_average = 0
if step_average > 0:
point_avg = point_sum * 1.0 / step_average
scores.append( point_avg )
if len( scores ) > number_of_points:
last_avg = 0
for j in xrange( number_of_points - 1, len( scores ) ):
last_avg += scores[j]
last_avg = last_avg / ( len(scores) - number_of_points + 1 )
else:
last_avg = scores[-1]
score_points = []
for k in range( number_of_points - 1 ):
score_points.append( scores[k] )
score_points.append( last_avg )
return score_points
def __main__():
invalid_lines = 0
infile_score_name = sys.argv[1].strip()
outfile_R_name = sys.argv[2].strip()
infile_is_zipped = False
if zipfile.is_zipfile( infile_score_name ):
infile_is_zipped = True
infile_name = unzip( infile_score_name )
else:
infile_name = infile_score_name
# Determine tabular or fasta format within the first 100 lines
seq_method = None
data_type = None
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip( '\r\n' )
if not line or line.startswith( '#' ):
continue
if data_type == None:
if line.startswith( '>' ):
data_type = 'fasta'
continue
elif len( line.split( '\t' ) ) > 0:
fields = line.split()
for score in fields:
try:
int( score )
data_type = 'tabular'
seq_method = 'solexa'
break
except:
break
elif data_type == 'fasta':
fields = line.split()
for score in fields:
try:
int( score )
seq_method = '454'
break
except:
break
if i == 100:
break
if data_type is None:
stop_err( 'This tool can only use fasta data or tabular data.' )
if seq_method is None:
stop_err( 'Invalid data for fasta format.')
# Determine fixed length or variable length within the first 100 lines
read_length = 0
variable_length = False
if seq_method == 'solexa':
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip( '\r\n' )
if not line or line.startswith( '#' ):
continue
scores = line.split('\t')
if read_length == 0:
read_length = len( scores )
if read_length != len( scores ):
variable_length = True
break
if i == 100:
break
elif seq_method == '454':
score = ''
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip( '\r\n' )
if not line or line.startswith( '#' ):
continue
if line.startswith( '>' ):
if len( score ) > 0:
score = score.split()
if read_length == 0:
read_length = len( score )
if read_length != len( score ):
variable_length = True
break
score = ''
else:
score = score + ' ' + line
if i == 100:
break
if variable_length:
number_of_points = 20
else:
number_of_points = read_length
read_length_threshold = 100 # minimal read length for 454 file
score_points = []
score_matrix = []
invalid_scores = 0
if seq_method == 'solexa':
for i, line in enumerate( open( infile_name ) ):
line = line.rstrip( '\r\n' )
if not line or line.startswith( '#' ):
continue
tmp_array = []
scores = line.split( '\t' )
for bases in scores:
nuc_errors = bases.split()
try:
nuc_errors[0] = int( nuc_errors[0] )
nuc_errors[1] = int( nuc_errors[1] )
nuc_errors[2] = int( nuc_errors[2] )
nuc_errors[3] = int( nuc_errors[3] )
big = max( nuc_errors )
except:
#print 'Invalid numbers in the file. Skipped.'
invalid_scores += 1
big = 0
tmp_array.append( big )
score_points.append( tmp_array )
elif seq_method == '454':
# skip the last fasta sequence
score = ''
for i, line in enumerate( open( infile_name ) ):
line = line.rstrip( '\r\n' )
if not line or line.startswith( '#' ):
continue
if line.startswith( '>' ):
if len( score ) > 0:
score = ['0'] + score.split()
read_length = len( score )
tmp_array = []
if not variable_length:
score.pop(0)
score_points.append( score )
tmp_array = score
elif read_length > read_length_threshold:
score_points_tmp = merge_to_20_datapoints( score )
score_points.append( score_points_tmp )
tmp_array = score_points_tmp
score = ''
else:
score = "%s %s" % ( score, line )
if len( score ) > 0:
score = ['0'] + score.split()
read_length = len( score )
if not variable_length:
score.pop(0)
score_points.append( score )
elif read_length > read_length_threshold:
score_points_tmp = merge_to_20_datapoints( score )
score_points.append( score_points_tmp )
tmp_array = score_points_tmp
# reverse the matrix, for R
for i in range( number_of_points - 1 ):
tmp_array = []
for j in range( len( score_points ) ):
try:
tmp_array.append( int( score_points[j][i] ) )
except:
invalid_lines += 1
score_matrix.append( tmp_array )
# generate pdf figures
#outfile_R_pdf = outfile_R_name
#r.pdf( outfile_R_pdf )
outfile_R_png = outfile_R_name
r.bitmap( outfile_R_png )
title = "boxplot of quality scores"
empty_score_matrix_columns = 0
for i, subset in enumerate( score_matrix ):
if not subset:
empty_score_matrix_columns += 1
score_matrix[i] = [0]
if not variable_length:
r.boxplot( score_matrix, xlab="location in read length", main=title )
else:
r.boxplot( score_matrix, xlab="position within read (% of total length)", xaxt="n", main=title )
x_old_range = []
x_new_range = []
step = read_length_threshold / number_of_points
for i in xrange( 0, read_length_threshold, step ):
x_old_range.append( ( i / step ) )
x_new_range.append( i )
r.axis( 1, x_old_range, x_new_range )
r.dev_off()
if infile_is_zipped and os.path.exists( infile_name ):
# Need to delete temporary file created when we unzipped the infile archive
os.remove( infile_name )
if invalid_scores > 0:
print 'Skipped %d invalid scores. ' % invalid_scores
if invalid_lines > 0:
print 'Skipped %d invalid lines. ' % invalid_lines
if empty_score_matrix_columns > 0:
print '%d missing scores in score_matrix. ' % empty_score_matrix_columns
r.quit(save = "no")
if __name__=="__main__":__main__()
| mit |
andris210296/andris-projeto | backend/venv/lib/python2.7/site-packages/unidecode/x08f.py | 252 | 4651 | data = (
'Er ', # 0x00
'Qiong ', # 0x01
'Ju ', # 0x02
'Jiao ', # 0x03
'Guang ', # 0x04
'Lu ', # 0x05
'Kai ', # 0x06
'Quan ', # 0x07
'Zhou ', # 0x08
'Zai ', # 0x09
'Zhi ', # 0x0a
'She ', # 0x0b
'Liang ', # 0x0c
'Yu ', # 0x0d
'Shao ', # 0x0e
'You ', # 0x0f
'Huan ', # 0x10
'Yun ', # 0x11
'Zhe ', # 0x12
'Wan ', # 0x13
'Fu ', # 0x14
'Qing ', # 0x15
'Zhou ', # 0x16
'Ni ', # 0x17
'Ling ', # 0x18
'Zhe ', # 0x19
'Zhan ', # 0x1a
'Liang ', # 0x1b
'Zi ', # 0x1c
'Hui ', # 0x1d
'Wang ', # 0x1e
'Chuo ', # 0x1f
'Guo ', # 0x20
'Kan ', # 0x21
'Yi ', # 0x22
'Peng ', # 0x23
'Qian ', # 0x24
'Gun ', # 0x25
'Nian ', # 0x26
'Pian ', # 0x27
'Guan ', # 0x28
'Bei ', # 0x29
'Lun ', # 0x2a
'Pai ', # 0x2b
'Liang ', # 0x2c
'Ruan ', # 0x2d
'Rou ', # 0x2e
'Ji ', # 0x2f
'Yang ', # 0x30
'Xian ', # 0x31
'Chuan ', # 0x32
'Cou ', # 0x33
'Qun ', # 0x34
'Ge ', # 0x35
'You ', # 0x36
'Hong ', # 0x37
'Shu ', # 0x38
'Fu ', # 0x39
'Zi ', # 0x3a
'Fu ', # 0x3b
'Wen ', # 0x3c
'Ben ', # 0x3d
'Zhan ', # 0x3e
'Yu ', # 0x3f
'Wen ', # 0x40
'Tao ', # 0x41
'Gu ', # 0x42
'Zhen ', # 0x43
'Xia ', # 0x44
'Yuan ', # 0x45
'Lu ', # 0x46
'Jiu ', # 0x47
'Chao ', # 0x48
'Zhuan ', # 0x49
'Wei ', # 0x4a
'Hun ', # 0x4b
'Sori ', # 0x4c
'Che ', # 0x4d
'Jiao ', # 0x4e
'Zhan ', # 0x4f
'Pu ', # 0x50
'Lao ', # 0x51
'Fen ', # 0x52
'Fan ', # 0x53
'Lin ', # 0x54
'Ge ', # 0x55
'Se ', # 0x56
'Kan ', # 0x57
'Huan ', # 0x58
'Yi ', # 0x59
'Ji ', # 0x5a
'Dui ', # 0x5b
'Er ', # 0x5c
'Yu ', # 0x5d
'Xian ', # 0x5e
'Hong ', # 0x5f
'Lei ', # 0x60
'Pei ', # 0x61
'Li ', # 0x62
'Li ', # 0x63
'Lu ', # 0x64
'Lin ', # 0x65
'Che ', # 0x66
'Ya ', # 0x67
'Gui ', # 0x68
'Xuan ', # 0x69
'Di ', # 0x6a
'Ren ', # 0x6b
'Zhuan ', # 0x6c
'E ', # 0x6d
'Lun ', # 0x6e
'Ruan ', # 0x6f
'Hong ', # 0x70
'Ku ', # 0x71
'Ke ', # 0x72
'Lu ', # 0x73
'Zhou ', # 0x74
'Zhi ', # 0x75
'Yi ', # 0x76
'Hu ', # 0x77
'Zhen ', # 0x78
'Li ', # 0x79
'Yao ', # 0x7a
'Qing ', # 0x7b
'Shi ', # 0x7c
'Zai ', # 0x7d
'Zhi ', # 0x7e
'Jiao ', # 0x7f
'Zhou ', # 0x80
'Quan ', # 0x81
'Lu ', # 0x82
'Jiao ', # 0x83
'Zhe ', # 0x84
'Fu ', # 0x85
'Liang ', # 0x86
'Nian ', # 0x87
'Bei ', # 0x88
'Hui ', # 0x89
'Gun ', # 0x8a
'Wang ', # 0x8b
'Liang ', # 0x8c
'Chuo ', # 0x8d
'Zi ', # 0x8e
'Cou ', # 0x8f
'Fu ', # 0x90
'Ji ', # 0x91
'Wen ', # 0x92
'Shu ', # 0x93
'Pei ', # 0x94
'Yuan ', # 0x95
'Xia ', # 0x96
'Zhan ', # 0x97
'Lu ', # 0x98
'Che ', # 0x99
'Lin ', # 0x9a
'Xin ', # 0x9b
'Gu ', # 0x9c
'Ci ', # 0x9d
'Ci ', # 0x9e
'Pi ', # 0x9f
'Zui ', # 0xa0
'Bian ', # 0xa1
'La ', # 0xa2
'La ', # 0xa3
'Ci ', # 0xa4
'Xue ', # 0xa5
'Ban ', # 0xa6
'Bian ', # 0xa7
'Bian ', # 0xa8
'Bian ', # 0xa9
'[?] ', # 0xaa
'Bian ', # 0xab
'Ban ', # 0xac
'Ci ', # 0xad
'Bian ', # 0xae
'Bian ', # 0xaf
'Chen ', # 0xb0
'Ru ', # 0xb1
'Nong ', # 0xb2
'Nong ', # 0xb3
'Zhen ', # 0xb4
'Chuo ', # 0xb5
'Chuo ', # 0xb6
'Suberu ', # 0xb7
'Reng ', # 0xb8
'Bian ', # 0xb9
'Bian ', # 0xba
'Sip ', # 0xbb
'Ip ', # 0xbc
'Liao ', # 0xbd
'Da ', # 0xbe
'Chan ', # 0xbf
'Gan ', # 0xc0
'Qian ', # 0xc1
'Yu ', # 0xc2
'Yu ', # 0xc3
'Qi ', # 0xc4
'Xun ', # 0xc5
'Yi ', # 0xc6
'Guo ', # 0xc7
'Mai ', # 0xc8
'Qi ', # 0xc9
'Za ', # 0xca
'Wang ', # 0xcb
'Jia ', # 0xcc
'Zhun ', # 0xcd
'Ying ', # 0xce
'Ti ', # 0xcf
'Yun ', # 0xd0
'Jin ', # 0xd1
'Hang ', # 0xd2
'Ya ', # 0xd3
'Fan ', # 0xd4
'Wu ', # 0xd5
'Da ', # 0xd6
'E ', # 0xd7
'Huan ', # 0xd8
'Zhe ', # 0xd9
'Totemo ', # 0xda
'Jin ', # 0xdb
'Yuan ', # 0xdc
'Wei ', # 0xdd
'Lian ', # 0xde
'Chi ', # 0xdf
'Che ', # 0xe0
'Ni ', # 0xe1
'Tiao ', # 0xe2
'Zhi ', # 0xe3
'Yi ', # 0xe4
'Jiong ', # 0xe5
'Jia ', # 0xe6
'Chen ', # 0xe7
'Dai ', # 0xe8
'Er ', # 0xe9
'Di ', # 0xea
'Po ', # 0xeb
'Wang ', # 0xec
'Die ', # 0xed
'Ze ', # 0xee
'Tao ', # 0xef
'Shu ', # 0xf0
'Tuo ', # 0xf1
'Kep ', # 0xf2
'Jing ', # 0xf3
'Hui ', # 0xf4
'Tong ', # 0xf5
'You ', # 0xf6
'Mi ', # 0xf7
'Beng ', # 0xf8
'Ji ', # 0xf9
'Nai ', # 0xfa
'Yi ', # 0xfb
'Jie ', # 0xfc
'Zhui ', # 0xfd
'Lie ', # 0xfe
'Xun ', # 0xff
)
| mit |
jrshust/spark | python/setup.py | 25 | 9659 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.4'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
cbrewster/servo | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 26 | 4156 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png"), "rb").read()
elif content_type == "audio/wav":
response_data = open(os.path.join(request.doc_root,
"webaudio", "resources", "sin_440Hz_-6dBFS_1s.wav"), "rb").read()
elif content_type == "video/ogg":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.ogv"), "rb").read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js"), "rb").read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
| mpl-2.0 |
hkariti/ansible | lib/ansible/module_utils/network/fortios/fortios.py | 89 | 8000 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Benjamin Jolivot <bjolivot@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import time
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import env_fallback
# check for pyFG lib
try:
from pyFG import FortiOS, FortiConfig
from pyFG.exceptions import FailedCommit
HAS_PYFG = True
except ImportError:
HAS_PYFG = False
fortios_argument_spec = dict(
file_mode=dict(type='bool', default=False),
config_file=dict(type='path'),
host=dict(),
username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
password=dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
timeout=dict(type='int', default=60),
vdom=dict(type='str'),
backup=dict(type='bool', default=False),
backup_path=dict(type='path'),
backup_filename=dict(type='str'),
)
fortios_required_if = [
['file_mode', False, ['host', 'username', 'password']],
['file_mode', True, ['config_file']],
['backup', True, ['backup_path']],
]
fortios_mutually_exclusive = [
['config_file', 'host'],
['config_file', 'username'],
['config_file', 'password']
]
fortios_error_codes = {
'-3': "Object not found",
'-61': "Command error"
}
def backup(module, running_config):
backup_path = module.params['backup_path']
backup_filename = module.params['backup_filename']
if not os.path.exists(backup_path):
try:
os.mkdir(backup_path)
except:
module.fail_json(msg="Can't create directory {0} Permission denied ?".format(backup_path))
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
if 0 < len(backup_filename):
filename = '%s/%s' % (backup_path, backup_filename)
else:
filename = '%s/%s_config.%s' % (backup_path, module.params['host'], tstamp)
try:
open(filename, 'w').write(running_config)
except:
module.fail_json(msg="Can't create backup file {0} Permission denied ?".format(filename))
class AnsibleFortios(object):
def __init__(self, module):
if not HAS_PYFG:
module.fail_json(msg='Could not import the python library pyFG required by this module')
self.result = {
'changed': False,
}
self.module = module
def _connect(self):
if self.module.params['file_mode']:
self.forti_device = FortiOS('')
else:
host = self.module.params['host']
username = self.module.params['username']
password = self.module.params['password']
timeout = self.module.params['timeout']
vdom = self.module.params['vdom']
self.forti_device = FortiOS(host, username=username, password=password, timeout=timeout, vdom=vdom)
try:
self.forti_device.open()
except Exception as e:
self.module.fail_json(msg='Error connecting device. %s' % to_native(e),
exception=traceback.format_exc())
def load_config(self, path):
self.path = path
self._connect()
# load in file_mode
if self.module.params['file_mode']:
try:
f = open(self.module.params['config_file'], 'r')
running = f.read()
f.close()
except IOError as e:
self.module.fail_json(msg='Error reading configuration file. %s' % to_native(e),
exception=traceback.format_exc())
self.forti_device.load_config(config_text=running, path=path)
else:
# get config
try:
self.forti_device.load_config(path=path)
except Exception as e:
self.forti_device.close()
self.module.fail_json(msg='Error reading running config. %s' % to_native(e),
exception=traceback.format_exc())
# set configs in object
self.result['running_config'] = self.forti_device.running_config.to_text()
self.candidate_config = self.forti_device.candidate_config
# backup if needed
if self.module.params['backup']:
backup(self.module, self.forti_device.running_config.to_text())
def apply_changes(self):
change_string = self.forti_device.compare_config()
if change_string:
self.result['change_string'] = change_string
self.result['changed'] = True
# Commit if not check mode
if change_string and not self.module.check_mode:
if self.module.params['file_mode']:
try:
f = open(self.module.params['config_file'], 'w')
f.write(self.candidate_config.to_text())
f.close()
except IOError as e:
self.module.fail_json(msg='Error writing configuration file. %s' %
to_native(e), exception=traceback.format_exc())
else:
try:
self.forti_device.commit()
except FailedCommit as e:
# Something's wrong (rollback is automatic)
self.forti_device.close()
error_list = self.get_error_infos(e)
self.module.fail_json(msg_error_list=error_list, msg="Unable to commit change, check your args, the error was %s" % e.message)
self.forti_device.close()
self.module.exit_json(**self.result)
def del_block(self, block_id):
self.forti_device.candidate_config[self.path].del_block(block_id)
def add_block(self, block_id, block):
self.forti_device.candidate_config[self.path][block_id] = block
def get_error_infos(self, cli_errors):
error_list = []
for errors in cli_errors.args:
for error in errors:
error_code = error[0]
error_string = error[1]
error_type = fortios_error_codes.get(error_code, "unknown")
error_list.append(dict(error_code=error_code, error_type=error_type, error_string=error_string))
return error_list
def get_empty_configuration_block(self, block_name, block_type):
return FortiConfig(block_name, block_type)
| gpl-3.0 |
rhelmer/socorro | socorro/external/postgresql/bugs.py | 9 | 2591 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Deprecated by socorro/external/postgresql/bugs_service.py"""
import logging
from socorro.external import MissingArgumentError, BadArgumentError
from socorro.external.postgresql.base import PostgreSQLBase
from socorro.lib import external_common
logger = logging.getLogger("webapi")
class Bugs(PostgreSQLBase):
"""Implement the /bugs service with PostgreSQL. """
filters = [
("signatures", None, ["list", "str"]),
("bug_ids", None, ["list", "str"]),
]
def get(self, **kwargs):
import warnings
warnings.warn("You should use the POST method to access bugs")
return self.post(**kwargs)
def post(self, **kwargs):
"""Return a list of signatures-to-bug_ids or bug_ids-to-signatures
associations. """
params = external_common.parse_arguments(self.filters, kwargs)
if not params['signatures'] and not params['bug_ids']:
raise MissingArgumentError('specify one of signatures or bug_ids')
elif params['signatures'] and params['bug_ids']:
raise BadArgumentError('specify only one of signatures or bug_ids')
sql_params = []
if params['signatures']:
sql_params.append(tuple(params.signatures))
sql = """/* socorro.external.postgresql.bugs.Bugs.get */
SELECT ba.signature, bugs.id
FROM bugs
JOIN bug_associations AS ba ON bugs.id = ba.bug_id
WHERE EXISTS(
SELECT 1 FROM bug_associations
WHERE bug_associations.bug_id = bugs.id
AND signature IN %s
)
"""
elif params['bug_ids']:
sql_params.append(tuple(params.bug_ids))
sql = """/* socorro.external.postgresql.bugs.Bugs.get */
SELECT ba.signature, bugs.id
FROM bugs
JOIN bug_associations AS ba ON bugs.id = ba.bug_id
WHERE bugs.id IN %s
"""
error_message = "Failed to retrieve bug associations from PostgreSQL"
results = self.query(sql, sql_params, error_message=error_message)
bugs = []
for row in results:
bug = dict(zip(("signature", "id"), row))
bugs.append(bug)
return {
"hits": bugs,
"total": len(bugs)
}
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.